ngram
listlengths
0
67.8k
[ "django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField(", "migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField(", "), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ),", "] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created',", "model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels',", "migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), ]", "migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField(", "verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),", "Django 2.2.1 on 2020-10-08 10:05 from django.db import migrations, models import django.utils.timezone class", "name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated',", "model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment',", "verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),", "on 2020-10-08 10:05 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies", "migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField(", "verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),", "2020-10-08 10:05 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies =", "verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),", "# Generated by Django 2.2.1 on 2020-10-08 10:05 from django.db import migrations, models", "django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'),", "migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations", "import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations = [", "'0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article',", "= [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),", "dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True,", "[ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ),", "Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated',", "field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True,", "), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ),", "model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel',", "), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ),", "field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True,", "), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ),", "('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField(", "field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now,", "model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels',", "operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now,", "= [ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),", "name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created',", "from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog',", "field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_updated', field=models.DateTimeField(auto_now=True,", "field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now,", "[ migrations.AddField( model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ),", "2.2.1 on 2020-10-08 10:05 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations = [ migrations.AddField( model_name='channel',", "), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ),", "migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField(", "10:05 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [", "name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created',", "models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ] operations =", "name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_updated',", "model_name='channel', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article',", "verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),", "Generated by Django 2.2.1 on 2020-10-08 10:05 from django.db import migrations, models import", "model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment',", "migrations.AlterField( model_name='channel', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField(", "field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now,", "), migrations.AlterField( model_name='tagsmodels', name='tags_created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='tagsmodels', name='tags_updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ),", "by Django 2.2.1 on 2020-10-08 10:05 from django.db import migrations, models import django.utils.timezone", "name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated',", "migrations.AlterField( model_name='article', name='created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='article', name='updated', field=models.DateTimeField(auto_now=True, verbose_name='修改时间'), ), migrations.AlterField(", "verbose_name='修改时间'), ), migrations.AlterField( model_name='attachment', name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),", "name='Created', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'), ), migrations.AlterField( model_name='attachment', name='Updated', field=models.DateTimeField(auto_now=True, verbose_name='更新时间'), ), migrations.AlterField( model_name='channel', name='created',", "import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0004_auto_20201003_2247'), ]" ]
[ "[] seen = set() exchange = [] dirty = [] mats = []", "tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg", "access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if", "in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ:", "zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and", "noop if not dirty or iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self):", "access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def", "ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset,", "mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def", "= pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty", "= [] mats = [] for arg, access_mode in zip(args, access_modes): if arg.argtype", "in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self):", "assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {}", "*args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes)", "self.reductions = reductions self.mats = mats # Micro-optimisations if not reductions or iterset.comm.size", "zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError: seen[arg] =", "self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for", "tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def validator(self, kernel, iterset,", "if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype ==", "*self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty()", "= noop self.reduction_end = noop if not exchange or iterset.comm.size == 1: self.g2lbegin", "arg, access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple !=", "arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen", "seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property", "== 1: self.mark_dirty = noop def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode)", "mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def", "except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll = get_c_function(wrapper, self.c_argtypes) return self.code_cache.setdefault(key,", "noop if not exchange or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend =", "1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin = noop self.l2gend = noop", "= [] seen = set() for arg in self.args: arglist += arg._parloop_args_ argtypes", "debug_check_args def filter_args(args, access_modes): reductions = [] seen = set() exchange = []", "in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self):", "= noop self.l2gend = noop if not dirty or iterset.comm.size == 1: self.mark_dirty", "exchange self.dirty = dirty self.reductions = reductions self.mats = mats # Micro-optimisations if", "in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes)", "def l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode", "reductions = [] seen = set() exchange = [] dirty = [] mats", "def filter_args(args, access_modes): reductions = [] seen = set() exchange = [] dirty", "= self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen =", "== 1: self.reduction_begin = noop self.reduction_end = noop if not exchange or iterset.comm.size", "mats # Micro-optimisations if not reductions or iterset.comm.size == 1: self.reduction_begin = noop", "ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj,", "self.exchange = exchange self.dirty = dirty self.reductions = reductions self.mats = mats #", "self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll = get_c_function(wrapper, self.c_argtypes) return", "filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions = reductions self.mats =", "access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def validator(self,", "reductions or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end = noop if not", "def g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode", "or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin = noop", "# self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes =", "self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange", "self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size,", "import AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions = []", "exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode))", "in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist", "from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args", "= self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen = set() for arg", "arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_,", "not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode !=", "= dirty self.reductions = reductions self.mats = mats # Micro-optimisations if not reductions", "= filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions = reductions self.mats", "d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange: d.l2gend(mode)", "for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid", "get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet", "isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for", "in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid = False def", "@cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key =", "+= arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t in", "= [] maplist = [] seen = set() for arg in self.args: arglist", "def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in", "== access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args,", "import cached_property, debug_check_args def filter_args(args, access_modes): reductions = [] seen = set() exchange", "from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from", "return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll = get_c_function(wrapper, self.c_argtypes)", "def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key = (self.kernel,", "arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass", "self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self):", "seen = set() exchange = [] dirty = [] mats = [] for", "arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode", "_arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist = []", "AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for arg,", "+ tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self):", "IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes):", "noop self.reduction_end = noop if not exchange or iterset.comm.size == 1: self.g2lbegin =", "*args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region", "for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode", "self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = []", "if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes", "in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, )", "import AccessMode, ArgType, IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel", "= noop if not exchange or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend", "def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode", "maplist = [] seen = set() for arg in self.args: arglist += arg._parloop_args_", "dirty or iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self): for d, mode", "pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region = iteration_region", "AccessMode, ArgType, IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from", "try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll = get_c_function(wrapper,", "d in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend()", "self.l2gend = noop if not dirty or iterset.comm.size == 1: self.mark_dirty = noop", "not reductions or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end = noop if", "@debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel =", "def mark_dirty(self): for d in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0,", "argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0]", "for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange:", "= iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats =", "pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions =", "ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode))", "seen = {} for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert", "code_cache = {} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a", "kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions,", "import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import", "access_modes): reductions = [] seen = set() exchange = [] dirty = []", "= [] dirty = [] mats = [] for arg, access_mode in zip(args,", "if not exchange or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend = noop", "in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError: seen[arg]", "l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in", "self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() #", "in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue", "if not reductions or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end = noop", "if not dirty or iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self): for", "tuple(mats) def noop(): pass class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False):", "mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def", "KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll = get_c_function(wrapper, self.c_argtypes) return self.code_cache.setdefault(key, dll)", "kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args)", "= noop self.g2lend = noop self.l2gbegin = noop self.l2gend = noop if not", "c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def", "in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self):", "Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property,", "self.args = tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg", "pass class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel)", "IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import", "from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions = [] seen =", "= {} for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg]", "g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in", "= access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args)", "in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self):", "self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid = False def execute(self):", "continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def", "isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset)", "@cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region,", "dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj, access_mode))", "exchange = [] dirty = [] mats = [] for arg, access_mode in", "= (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key]", "self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes)", "self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for", "d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist)", "self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll =", "== 1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin = noop self.l2gend =", "self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key", "access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel", "assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion)", "= reductions self.mats = mats # Micro-optimisations if not reductions or iterset.comm.size ==", "map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist),", "self.mats = mats # Micro-optimisations if not reductions or iterset.comm.size == 1: self.reduction_begin", "reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats)", "self.dirty = dirty self.reductions = reductions self.mats = mats # Micro-optimisations if not", "= tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg =", "try: assert seen[arg] == access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self,", "[] seen = set() for arg in self.args: arglist += arg._parloop_args_ argtypes +=", "arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def", "access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj", "== ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj not in seen:", "+ tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache", "for arg in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in", "= exchange self.dirty = dirty self.reductions = reductions self.mats = mats # Micro-optimisations", "== len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode in zip(args,", "self.kernel = kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange,", "!= AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty),", "iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert", "for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m)", "m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes +", "assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode", "assert seen[arg] == access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel,", "self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen = set()", "@cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist", "kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions = reductions self.mats = mats", "key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try: return", "validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert", "access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError:", "return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property", "Micro-optimisations if not reductions or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end =", "self.g2lend = noop self.l2gbegin = noop self.l2gend = noop if not dirty or", "[] dirty = [] mats = [] for arg, access_mode in zip(args, access_modes):", "class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert", "self.l2gbegin = noop self.l2gend = noop if not dirty or iterset.comm.size == 1:", "g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in", "t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return", "noop self.g2lend = noop self.l2gbegin = noop self.l2gend = noop if not dirty", "tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def validator(self, kernel, iterset, *args,", "def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist =", "if arg.map_tuple != (IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj)", "mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def", "self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats", "[] mats = [] for arg, access_mode in zip(args, access_modes): if arg.argtype ==", "zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t) return arglist +", "self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen = set() for arg in", "kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset =", "and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and", "d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode)", "def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel", "tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return", "argtypes = self.iterset._parloop_argtypes_ maptypes = [] maplist = [] seen = set() for", "ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object):", "if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop():", "g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode)", "arg in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple:", "iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats = filter_args(args,", "g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d", "for a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper =", "self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for", "for arg, access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple", "if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj not", "arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT:", "d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g,", "+= arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if", "self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes =", "build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import", "= iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange", "d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d,", "access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL,", "self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend()", "from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions", "def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property", "self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist =", "mats = [] for arg, access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT:", "self.g2lbegin = noop self.g2lend = noop self.l2gbegin = noop self.l2gend = noop if", "pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils", "arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t", "__init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset", "noop def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d,", "mark_dirty(self): for d in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size,", "def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode", "return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self):", "return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_", "seen[arg] == access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset,", "self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_ maptypes", "g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid =", "access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange),", "seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args =", "def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg)", "= False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin()", "reductions self.mats = mats # Micro-optimisations if not reductions or iterset.comm.size == 1:", "self.reduction_begin = noop self.reduction_end = noop if not exchange or iterset.comm.size == 1:", "execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end()", "mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def", "or iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self): for d, mode in", "in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self):", "assert isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode in zip(args, kernel.access_modes): assert", "def reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode", "a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2],", "False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() #", "arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator) def", "isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen", "self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for m,", "cached_property, debug_check_args def filter_args(args, access_modes): reductions = [] seen = set() exchange =", "iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self): for d, mode in self.exchange:", "iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset = iterset self.iteration_region =", "self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def", "arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m", "@cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache = {}", "mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions = reductions", "filter_args(args, access_modes): reductions = [] seen = set() exchange = [] dirty =", ") and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL", "self.reduction_end = noop if not exchange or iterset.comm.size == 1: self.g2lbegin = noop", "= kernel self.iterset = iterset self.iteration_region = iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty,", "iterset.comm.size == 1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin = noop self.l2gend", "IterationRegion) seen = {} for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try:", "from pyop3.api import AccessMode, ArgType, IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel", "reduction_end(self): for g, mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty:", "self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg)", "== ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj,", "iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end = noop if not exchange or", "dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try:", "AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions),", "set() for arg in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_", "d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d,", "self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for", "= {} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in", "# Micro-optimisations if not reductions or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end", "from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap", "= noop if not dirty or iterset.comm.size == 1: self.mark_dirty = noop def", "seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj,", "iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) ==", "1: self.reduction_begin = noop self.reduction_end = noop if not exchange or iterset.comm.size ==", "l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in", "in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for", "for map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in", "len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode in", "in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region,", "for d in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist)", "g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin()", "def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin()", "for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in self.reductions:", "arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap, ) and arg.obj not in", "= set() exchange = [] dirty = [] mats = [] for arg,", "mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def", "*self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist", "KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args", "dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions", "{} for arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] ==", "and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype == ArgType.MAT: mats.append((arg.obj, access_mode)) return", "or iterset.comm.size == 1: self.reduction_begin = noop self.reduction_end = noop if not exchange", "len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen = {} for arg, access_mode in zip(args, kernel.access_modes):", "= noop self.l2gbegin = noop self.l2gend = noop if not dirty or iterset.comm.size", "= mats # Micro-optimisations if not reductions or iterset.comm.size == 1: self.reduction_begin =", "pyop3.obj.maps import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args def", "AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions = [] seen", "pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region,", "m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m) maplist.append(m) maptypes.append(t)", "not dirty or iterset.comm.size == 1: self.mark_dirty = noop def g2lbegin(self): for d,", "for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange:", "for d, mode in self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange:", "1: self.mark_dirty = noop def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def", "arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen: continue seen.add(m)", "exchange or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin =", "pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from", "= noop def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for", "d, mode in self.exchange: d.g2lend(mode) def l2gbegin(self): for d, mode in self.exchange: d.l2gbegin(mode)", "import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps import IdentityMap from pyop3.obj.sets", "= [] seen = set() exchange = [] dirty = [] mats =", "self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper = build_wrapper(*key[:-2], iteration_region=self.iteration_region, pass_layer_arg=self.pass_layer_arg) dll", "kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError: seen[arg] = access_mode", "noop self.l2gbegin = noop self.l2gend = noop if not dirty or iterset.comm.size ==", "mode in self.reductions: g.reduction_end(mode) def mark_dirty(self): for d in self.dirty: d.halo_valid = False", "*(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError: wrapper", "iteration_region self.pass_layer_arg = pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange =", "self.mark_dirty = noop def g2lbegin(self): for d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self):", "Kernel) assert isinstance(iterset, AbstractSet) assert len(args) == len(kernel.access_modes) assert isinstance(iteration_region, IterationRegion) seen =", "d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g,", "= set() for arg in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_ for", "self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property", "maptypes = [] maplist = [] seen = set() for arg in self.args:", "self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except KeyError:", "except KeyError: seen[arg] = access_mode @debug_check_args(validator) def __init__(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False):", "dirty = [] mats = [] for arg, access_mode in zip(args, access_modes): if", "== ArgType.MAT: mats.append((arg.obj, access_mode)) return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class", "noop self.l2gend = noop if not dirty or iterset.comm.size == 1: self.mark_dirty =", "noop(): pass class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel,", "= [] for arg, access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj)", "in self.dirty: d.halo_valid = False def execute(self): self.g2lbegin() self.dll(0, self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size,", "set() exchange = [] dirty = [] mats = [] for arg, access_mode", "(IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype ==", "access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if arg.map_tuple != (IdentityMap,", "argtypes += arg._parloop_argtypes_ for map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_):", "ArgType, IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import Kernel from pyop3.obj.maps", "return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def validator(self, kernel,", "c_arglist(self): return self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_,", "{} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args),", "def noop(): pass class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert", "maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1]", "reduction_begin(self): for g, mode in self.reductions: g.reduction_begin(mode) def reduction_end(self): for g, mode in", "not exchange or iterset.comm.size == 1: self.g2lbegin = noop self.g2lend = noop self.l2gbegin", "seen.add(arg.obj) if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ: reductions.append((arg.obj, access_mode)) if arg.argtype", "maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self): return", "pyop3.utils import cached_property, debug_check_args def filter_args(args, access_modes): reductions = [] seen = set()", "exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty", "tuple(maptypes) @cached_property def c_argtypes(self): return self._arglist_and_types[1] @cached_property def c_arglist(self): return self._arglist_and_types[0] code_cache =", "map_ in arg.map_tuple: for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_): if m in seen:", "[] maplist = [] seen = set() for arg in self.args: arglist +=", "d, mode in self.exchange: d.g2lbegin(mode) def g2lend(self): for d, mode in self.exchange: d.g2lend(mode)", "pyop3.api import AccessMode, ArgType, IterationRegion from pyop3.codegen.compiled import build_wrapper, get_c_function from pyop3.obj.kernel import", "dirty self.reductions = reductions self.mats = mats # Micro-optimisations if not reductions or", "self._arglist_and_types[0] code_cache = {} @cached_property def dll(self): key = (self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for", "# self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_", "seen = set() for arg in self.args: arglist += arg._parloop_args_ argtypes += arg._parloop_argtypes_", "iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): self.args = tuple(args) self.kernel = kernel self.iterset = iterset", "def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL, pass_layer_arg=False): assert isinstance(kernel, Kernel) assert isinstance(iterset, AbstractSet)", "for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for g, mode in self.reductions:", "assert arg.validate(iterset) try: assert seen[arg] == access_mode except KeyError: seen[arg] = access_mode @debug_check_args(validator)", "reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty = dirty self.reductions =", "arg.map_tuple != (IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if", "seen.add(m) maplist.append(m) maptypes.append(t) return arglist + tuple(maplist), argtypes + tuple(maptypes) @cached_property def c_argtypes(self):", "arg, access_mode in zip(args, kernel.access_modes): assert arg.validate(iterset) try: assert seen[arg] == access_mode except", "self.exchange: d.l2gbegin(mode) def l2gend(self): for d, mode in self.exchange: d.l2gend(mode) def reduction_begin(self): for", "[] for arg, access_mode in zip(args, access_modes): if arg.argtype == ArgType.DAT: dirty.append(arg.obj) if", "pass_layer_arg exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes) self.exchange = exchange self.dirty =", "tuple(reductions), tuple(mats) def noop(): pass class ParLoop(object): def validator(self, kernel, iterset, *args, iteration_region=IterationRegion.ALL,", "(self.kernel, self.iterset._codegen_info_, *(a._codegen_info_ for a in self.args), self.iteration_region, self.pass_layer_arg) try: return self.code_cache[key] except", "self.iterset.core_size, *self.c_arglist) self.g2lend() self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist) self.reduction_begin() # self.l2gbegin() self.reduction_end() # self.l2gend() #", "# self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes = self.iterset._parloop_argtypes_", "import IdentityMap from pyop3.obj.sets import AbstractSet from pyop3.utils import cached_property, debug_check_args def filter_args(args,", "!= (IdentityMap, ) and arg.obj not in seen: exchange.append((arg.obj, access_mode)) seen.add(arg.obj) if arg.argtype", "self.l2gbegin() self.reduction_end() # self.l2gend() # self.mark_dirty() @cached_property def _arglist_and_types(self): arglist = self.iterset._parloop_args_ argtypes" ]
[ "(top N list) if M is None or n is None: Mtemp, ntemp", "if len(num_post) > 1 or len(num_oot) > 1: raise Exception('Number of posts or", "of OOT detector based on ranked result list.\"\"\" def __init__(self, result, M=None, n=None,", "list') self.result = result self.N = N # of posts taken (top N", "num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) > 1: raise", "of posts or OOT posts mismatch') if len(num_post) == 0 or len(num_oot) ==", "if M is None or n is None: Mtemp, ntemp = self._get_nums() self.M", "probability mass function of a hypergeometric random variable denoting the number of OOT", "posts or OOT posts mismatch') if len(num_post) == 0 or len(num_oot) == 0:", "of M and n if M < n: raise Exception('M should never be", "is a hypergeometric random variable associated with this event. \"\"\" return min(self.N, self.n)", "is None else M self.n = ntemp if n is None else n", "M self.n = ntemp if n is None else n else: # Check", "self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the", "the probability of getting k OOT posts in the top N list. \"\"\"", "Out-of-topic post detection evaluation methods. \"\"\" from collections import Counter import numpy as", "if M < n: raise Exception('M should never be less than n') self.M,", "\"\"\"Get the number of all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for", "all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult)", "self.result = result self.N = N # of posts taken (top N list)", "M is None else M self.n = ntemp if n is None else", "random variable associated with this event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self):", "top N list. The k-th element represents the probability of getting k OOT", "baseline performance vector. The baseline is obtaining OOT posts by chance. Thus, the", "> 1 or len(num_oot) > 1: raise Exception('Number of posts or OOT posts", "subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if", "vector.\"\"\" num_expr = len(self.result) if num_expr == 0: raise Exception('No experiment error') top_oot_nums", "else n else: # Check validity of M and n if M <", "ntemp = self._get_nums() self.M = Mtemp if M is None else M self.n", "N < 0: raise Exception('Cannot pick negative number of posts in top list')", "getting k OOT posts in the top N list. \"\"\" rv = hypergeom(self.M,", "Check validity of M and n if M < n: raise Exception('M should", "hypergeometric random variable denoting the number of OOT posts in the top N", "n=None, N=1): if N < 0: raise Exception('Cannot pick negative number of posts", "a hypergeometric random variable denoting the number of OOT posts in the top", "M < n: raise Exception('M should never be less than n') self.M, self.n", "- self.min_sup + 1 res = np.zeros(length) count = Counter(top_oot_nums) for k in", "numpy as np from scipy.stats import hypergeom from otdet.util import lazyproperty class TopListEvaluator:", "methods. \"\"\" from collections import Counter import numpy as np from scipy.stats import", "def _get_nums(self): \"\"\"Get the number of all and OOT posts.\"\"\" def get_num_oot(subresult): return", "\"\"\"Evaluate performance of OOT detector based on ranked result list.\"\"\" def __init__(self, result,", "the top N list. The k-th element represents the probability of getting k", "raise Exception('No experiment error') top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N]) for", "Thus, the baseline performance vector is the probability mass function of a hypergeometric", "count = Counter(top_oot_nums) for k in range(length): res[k] = count[k] / num_expr return", "self.n = ntemp if n is None else n else: # Check validity", "raise Exception('Cannot pick negative number of posts in top list') self.result = result", "= Counter(top_oot_nums) for k in range(length): res[k] = count[k] / num_expr return res", "- self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support value", "subresult[:self.N]) for subresult in self.result] length = self.max_sup - self.min_sup + 1 res", "X. X is a hypergeometric random variable associated with this event. \"\"\" return", "OOT posts in the top N list. The k-th element represents the probability", "Exception('M should never be less than n') self.M, self.n = M, n def", "return sum(is_oot for _, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult", "n: raise Exception('M should never be less than n') self.M, self.n = M,", "maximum support value of random variable X. X is a hypergeometric random variable", "TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on ranked result list.\"\"\" def __init__(self,", "top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N]) for subresult in self.result] length", "k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result", "the number of OOT posts in the top N list. The k-th element", "num_expr = len(self.result) if num_expr == 0: raise Exception('No experiment error') top_oot_nums =", "self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation", "import Counter import numpy as np from scipy.stats import hypergeom from otdet.util import", "\"\"\"Return the evaluation result in a performance vector.\"\"\" num_expr = len(self.result) if num_expr", "\"\"\" rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty", "for _, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result]", "sum(is_oot for _, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in", "n') self.M, self.n = M, n def _get_nums(self): \"\"\"Get the number of all", "posts in top list') self.result = result self.N = N # of posts", "the baseline performance vector is the probability mass function of a hypergeometric random", "in top list') self.result = result self.N = N # of posts taken", "import hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector", "mismatch') if len(num_post) == 0 or len(num_oot) == 0: return 0, 0 else:", "the baseline performance vector. The baseline is obtaining OOT posts by chance. Thus,", "num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support value of random variable X.", "random variable denoting the number of OOT posts in the top N list.", "The baseline is obtaining OOT posts by chance. Thus, the baseline performance vector", "def get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult))", "self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support value of", "N list. The k-th element represents the probability of getting k OOT posts", "result, M=None, n=None, N=1): if N < 0: raise Exception('Cannot pick negative number", "< n: raise Exception('M should never be less than n') self.M, self.n =", "the maximum support value of random variable X. X is a hypergeometric random", "0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support value", "subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp)", "import numpy as np from scipy.stats import hypergeom from otdet.util import lazyproperty class", "and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult) temp", "num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1", "+ self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support value of random", "self.max_sup - self.min_sup + 1 res = np.zeros(length) count = Counter(top_oot_nums) for k", "negative number of posts in top list') self.result = result self.N = N", "= [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot", "OOT posts in the top N list. \"\"\" rv = hypergeom(self.M, self.n, self.N)", "< 0: raise Exception('Cannot pick negative number of posts in top list') self.result", "in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup =", "= [sum(is_oot for _, is_oot in subresult[:self.N]) for subresult in self.result] length =", "based on ranked result list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if N", "obtaining OOT posts by chance. Thus, the baseline performance vector is the probability", "_, is_oot in subresult[:self.N]) for subresult in self.result] length = self.max_sup - self.min_sup", "N=1): if N < 0: raise Exception('Cannot pick negative number of posts in", "M, n def _get_nums(self): \"\"\"Get the number of all and OOT posts.\"\"\" def", "= result self.N = N # of posts taken (top N list) if", "M=None, n=None, N=1): if N < 0: raise Exception('Cannot pick negative number of", "= self._get_nums() self.M = Mtemp if M is None else M self.n =", "1 res = np.zeros(length) count = Counter(top_oot_nums) for k in range(length): res[k] =", "number of OOT posts in the top N list. The k-th element represents", "self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support value of random variable", "self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance vector. The baseline is obtaining", "n else: # Check validity of M and n if M < n:", "[sum(is_oot for _, is_oot in subresult[:self.N]) for subresult in self.result] length = self.max_sup", "# of posts taken (top N list) if M is None or n", "should never be less than n') self.M, self.n = M, n def _get_nums(self):", "OOT posts mismatch') if len(num_post) == 0 or len(num_oot) == 0: return 0,", "Exception('No experiment error') top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N]) for subresult", "min_sup(self): \"\"\"Return the minimum support value of random variable X. X is a", "associated with this event. \"\"\" return max(self.N - self.M + self.n, 0) @lazyproperty", "top list') self.result = result self.N = N # of posts taken (top", "== 0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot for _, is_oot in", "detection evaluation methods. \"\"\" from collections import Counter import numpy as np from", "return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support value of random", "1: raise Exception('Number of posts or OOT posts mismatch') if len(num_post) == 0", "_get_nums(self): \"\"\"Get the number of all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot", "\"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance vector. The", "zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) >", "OOT posts by chance. Thus, the baseline performance vector is the probability mass", "baseline performance vector is the probability mass function of a hypergeometric random variable", "the top N list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup,", "= self.max_sup - self.min_sup + 1 res = np.zeros(length) count = Counter(top_oot_nums) for", "posts by chance. Thus, the baseline performance vector is the probability mass function", "get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)),", "variable X. X is a hypergeometric random variable associated with this event. \"\"\"", "len(num_oot) > 1: raise Exception('Number of posts or OOT posts mismatch') if len(num_post)", "the number of all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _,", "@lazyproperty def performance(self): \"\"\"Return the evaluation result in a performance vector.\"\"\" num_expr =", "len(num_post) == 0 or len(num_oot) == 0: return 0, 0 else: return num_post[0],", "= hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self):", "for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup))", "None or n is None: Mtemp, ntemp = self._get_nums() self.M = Mtemp if", "Exception('Number of posts or OOT posts mismatch') if len(num_post) == 0 or len(num_oot)", "than n') self.M, self.n = M, n def _get_nums(self): \"\"\"Get the number of", "self.M, self.n = M, n def _get_nums(self): \"\"\"Get the number of all and", "chance. Thus, the baseline performance vector is the probability mass function of a", "hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return", "len(num_post) > 1 or len(num_oot) > 1: raise Exception('Number of posts or OOT", "class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on ranked result list.\"\"\" def", "0 or len(num_oot) == 0: return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty", "less than n') self.M, self.n = M, n def _get_nums(self): \"\"\"Get the number", "self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) >", "max(self.N - self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support", "as np from scipy.stats import hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate", "n if M < n: raise Exception('M should never be less than n')", "self.M = Mtemp if M is None else M self.n = ntemp if", "vector is the probability mass function of a hypergeometric random variable denoting the", "k OOT posts in the top N list. \"\"\" rv = hypergeom(self.M, self.n,", "+ 1 res = np.zeros(length) count = Counter(top_oot_nums) for k in range(length): res[k]", "self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result in a performance", "np from scipy.stats import hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance", "num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) > 1:", "vector. The baseline is obtaining OOT posts by chance. Thus, the baseline performance", "never be less than n') self.M, self.n = M, n def _get_nums(self): \"\"\"Get", "Exception('Cannot pick negative number of posts in top list') self.result = result self.N", "@lazyproperty def min_sup(self): \"\"\"Return the minimum support value of random variable X. X", "0) @lazyproperty def max_sup(self): \"\"\"Return the maximum support value of random variable X.", "\"\"\"Return the minimum support value of random variable X. X is a hypergeometric", "rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def", "M is None or n is None: Mtemp, ntemp = self._get_nums() self.M =", "pick negative number of posts in top list') self.result = result self.N =", "variable associated with this event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return", "performance vector.\"\"\" num_expr = len(self.result) if num_expr == 0: raise Exception('No experiment error')", "taken (top N list) if M is None or n is None: Mtemp,", "from collections import Counter import numpy as np from scipy.stats import hypergeom from", "\"\"\" Out-of-topic post detection evaluation methods. \"\"\" from collections import Counter import numpy", "if num_expr == 0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot for _,", "performance vector. The baseline is obtaining OOT posts by chance. Thus, the baseline", "the probability mass function of a hypergeometric random variable denoting the number of", "return max(self.N - self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the maximum", "ntemp if n is None else n else: # Check validity of M", "self.N = N # of posts taken (top N list) if M is", "validity of M and n if M < n: raise Exception('M should never", "or OOT posts mismatch') if len(num_post) == 0 or len(num_oot) == 0: return", "np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result in a", "len(num_oot) == 0: return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self):", "temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post,", "return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result in a performance vector.\"\"\"", "len(self.result) if num_expr == 0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot for", "if M is None else M self.n = ntemp if n is None", "support value of random variable X. X is a hypergeometric random variable associated", "subresult in self.result] length = self.max_sup - self.min_sup + 1 res = np.zeros(length)", "else: # Check validity of M and n if M < n: raise", "event. \"\"\" return max(self.N - self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return", "k-th element represents the probability of getting k OOT posts in the top", "is None else n else: # Check validity of M and n if", "num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or", "in the top N list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k =", "= M, n def _get_nums(self): \"\"\"Get the number of all and OOT posts.\"\"\"", "and n if M < n: raise Exception('M should never be less than", "evaluation result in a performance vector.\"\"\" num_expr = len(self.result) if num_expr == 0:", "OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult) temp =", "of all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot in", "is the probability mass function of a hypergeometric random variable denoting the number", "posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult) temp = [(len(subresult),", "with this event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline", "result in a performance vector.\"\"\" num_expr = len(self.result) if num_expr == 0: raise", "experiment error') top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N]) for subresult in", "posts mismatch') if len(num_post) == 0 or len(num_oot) == 0: return 0, 0", "associated with this event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the", "The k-th element represents the probability of getting k OOT posts in the", "is a hypergeometric random variable associated with this event. \"\"\" return max(self.N -", "def max_sup(self): \"\"\"Return the maximum support value of random variable X. X is", "@lazyproperty def baseline(self): \"\"\"Return the baseline performance vector. The baseline is obtaining OOT", "in self.result] length = self.max_sup - self.min_sup + 1 res = np.zeros(length) count", "mass function of a hypergeometric random variable denoting the number of OOT posts", "of a hypergeometric random variable denoting the number of OOT posts in the", "raise Exception('M should never be less than n') self.M, self.n = M, n", "value of random variable X. X is a hypergeometric random variable associated with", "in subresult[:self.N]) for subresult in self.result] length = self.max_sup - self.min_sup + 1", "max_sup(self): \"\"\"Return the maximum support value of random variable X. X is a", "result self.N = N # of posts taken (top N list) if M", "X is a hypergeometric random variable associated with this event. \"\"\" return min(self.N,", "N list) if M is None or n is None: Mtemp, ntemp =", "or len(num_oot) > 1: raise Exception('Number of posts or OOT posts mismatch') if", "= Mtemp if M is None else M self.n = ntemp if n", "in the top N list. The k-th element represents the probability of getting", "of posts taken (top N list) if M is None or n is", "N list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return", "0, 0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support", "self._get_nums() self.M = Mtemp if M is None else M self.n = ntemp", "return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance vector. The baseline", "error') top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N]) for subresult in self.result]", "with this event. \"\"\" return max(self.N - self.M + self.n, 0) @lazyproperty def", "posts in the top N list. The k-th element represents the probability of", "0: return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the", "== 0: return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return", "random variable X. X is a hypergeometric random variable associated with this event.", "np.zeros(length) count = Counter(top_oot_nums) for k in range(length): res[k] = count[k] / num_expr", "probability of getting k OOT posts in the top N list. \"\"\" rv", "OOT detector based on ranked result list.\"\"\" def __init__(self, result, M=None, n=None, N=1):", "def __init__(self, result, M=None, n=None, N=1): if N < 0: raise Exception('Cannot pick", "by chance. Thus, the baseline performance vector is the probability mass function of", "= ntemp if n is None else n else: # Check validity of", "scipy.stats import hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT", "\"\"\" return max(self.N - self.M + self.n, 0) @lazyproperty def max_sup(self): \"\"\"Return the", "of getting k OOT posts in the top N list. \"\"\" rv =", "> 1: raise Exception('Number of posts or OOT posts mismatch') if len(num_post) ==", "is None: Mtemp, ntemp = self._get_nums() self.M = Mtemp if M is None", "baseline is obtaining OOT posts by chance. Thus, the baseline performance vector is", "number of all and OOT posts.\"\"\" def get_num_oot(subresult): return sum(is_oot for _, is_oot", "num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support value of random variable", "__init__(self, result, M=None, n=None, N=1): if N < 0: raise Exception('Cannot pick negative", "or len(num_oot) == 0: return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty def", "= np.zeros(length) count = Counter(top_oot_nums) for k in range(length): res[k] = count[k] /", "this event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance", "lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on ranked result list.\"\"\"", "list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k)", "list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) > 1: raise Exception('Number of posts", "list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if N < 0: raise Exception('Cannot", "represents the probability of getting k OOT posts in the top N list.", "rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result in a performance vector.\"\"\" num_expr", "Counter import numpy as np from scipy.stats import hypergeom from otdet.util import lazyproperty", "a hypergeometric random variable associated with this event. \"\"\" return max(self.N - self.M", "else M self.n = ntemp if n is None else n else: #", "def performance(self): \"\"\"Return the evaluation result in a performance vector.\"\"\" num_expr = len(self.result)", "[(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot =", "def baseline(self): \"\"\"Return the baseline performance vector. The baseline is obtaining OOT posts", "ranked result list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if N < 0:", "detector based on ranked result list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if", "N # of posts taken (top N list) if M is None or", "post detection evaluation methods. \"\"\" from collections import Counter import numpy as np", "0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N])", "random variable associated with this event. \"\"\" return max(self.N - self.M + self.n,", "be less than n') self.M, self.n = M, n def _get_nums(self): \"\"\"Get the", "the minimum support value of random variable X. X is a hypergeometric random", "n def _get_nums(self): \"\"\"Get the number of all and OOT posts.\"\"\" def get_num_oot(subresult):", "raise Exception('Number of posts or OOT posts mismatch') if len(num_post) == 0 or", "number of posts in top list') self.result = result self.N = N #", "<filename>otdet/evaluation.py \"\"\" Out-of-topic post detection evaluation methods. \"\"\" from collections import Counter import", "hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based", "a hypergeometric random variable associated with this event. \"\"\" return min(self.N, self.n) @lazyproperty", "def min_sup(self): \"\"\"Return the minimum support value of random variable X. X is", "return 0, 0 else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum", "= list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) > 1: raise Exception('Number", "None else M self.n = ntemp if n is None else n else:", "otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on ranked", "n is None else n else: # Check validity of M and n", "result list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if N < 0: raise", "is_oot in subresult[:self.N]) for subresult in self.result] length = self.max_sup - self.min_sup +", "is obtaining OOT posts by chance. Thus, the baseline performance vector is the", "from scipy.stats import hypergeom from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of", "else: return num_post[0], num_oot[0] @lazyproperty def min_sup(self): \"\"\"Return the minimum support value of", "== 0 or len(num_oot) == 0: return 0, 0 else: return num_post[0], num_oot[0]", "variable associated with this event. \"\"\" return max(self.N - self.M + self.n, 0)", "if len(num_post) == 0 or len(num_oot) == 0: return 0, 0 else: return", "min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance vector. The baseline is", "None else n else: # Check validity of M and n if M", "event. \"\"\" return min(self.N, self.n) @lazyproperty def baseline(self): \"\"\"Return the baseline performance vector.", "= len(self.result) if num_expr == 0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot", "1 or len(num_oot) > 1: raise Exception('Number of posts or OOT posts mismatch')", "minimum support value of random variable X. X is a hypergeometric random variable", "of random variable X. X is a hypergeometric random variable associated with this", "list) if M is None or n is None: Mtemp, ntemp = self._get_nums()", "for _, is_oot in subresult[:self.N]) for subresult in self.result] length = self.max_sup -", "self.min_sup + 1 res = np.zeros(length) count = Counter(top_oot_nums) for k in range(length):", "hypergeometric random variable associated with this event. \"\"\" return max(self.N - self.M +", "= N # of posts taken (top N list) if M is None", "is None or n is None: Mtemp, ntemp = self._get_nums() self.M = Mtemp", "baseline(self): \"\"\"Return the baseline performance vector. The baseline is obtaining OOT posts by", "\"\"\"Return the maximum support value of random variable X. X is a hypergeometric", "None: Mtemp, ntemp = self._get_nums() self.M = Mtemp if M is None else", "_, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup,", "top N list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k = np.arange(self.min_sup, self.max_sup+1)", "performance(self): \"\"\"Return the evaluation result in a performance vector.\"\"\" num_expr = len(self.result) if", "is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for subresult in self.result] num_post_tup, num_oot_tup", "in self.result] num_post_tup, num_oot_tup = zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post)", "@lazyproperty def max_sup(self): \"\"\"Return the maximum support value of random variable X. X", "\"\"\"Return the baseline performance vector. The baseline is obtaining OOT posts by chance.", "posts in the top N list. \"\"\" rv = hypergeom(self.M, self.n, self.N) k", "evaluation methods. \"\"\" from collections import Counter import numpy as np from scipy.stats", "the evaluation result in a performance vector.\"\"\" num_expr = len(self.result) if num_expr ==", "function of a hypergeometric random variable denoting the number of OOT posts in", "list. The k-th element represents the probability of getting k OOT posts in", "if N < 0: raise Exception('Cannot pick negative number of posts in top", "= np.arange(self.min_sup, self.max_sup+1) return rv.pmf(k) @lazyproperty def performance(self): \"\"\"Return the evaluation result in", "\"\"\" from collections import Counter import numpy as np from scipy.stats import hypergeom", "in a performance vector.\"\"\" num_expr = len(self.result) if num_expr == 0: raise Exception('No", "of posts in top list') self.result = result self.N = N # of", "self.result] length = self.max_sup - self.min_sup + 1 res = np.zeros(length) count =", "= zip(*temp) num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot)", "a performance vector.\"\"\" num_expr = len(self.result) if num_expr == 0: raise Exception('No experiment", "of OOT posts in the top N list. The k-th element represents the", "# Check validity of M and n if M < n: raise Exception('M", "if n is None else n else: # Check validity of M and", "self.n = M, n def _get_nums(self): \"\"\"Get the number of all and OOT", "denoting the number of OOT posts in the top N list. The k-th", "hypergeometric random variable associated with this event. \"\"\" return min(self.N, self.n) @lazyproperty def", "collections import Counter import numpy as np from scipy.stats import hypergeom from otdet.util", "performance vector is the probability mass function of a hypergeometric random variable denoting", "Mtemp, ntemp = self._get_nums() self.M = Mtemp if M is None else M", "length = self.max_sup - self.min_sup + 1 res = np.zeros(length) count = Counter(top_oot_nums)", "X is a hypergeometric random variable associated with this event. \"\"\" return max(self.N", "list(set(num_post_tup)), list(set(num_oot_tup)) if len(num_post) > 1 or len(num_oot) > 1: raise Exception('Number of", "M and n if M < n: raise Exception('M should never be less", "res = np.zeros(length) count = Counter(top_oot_nums) for k in range(length): res[k] = count[k]", "for subresult in self.result] length = self.max_sup - self.min_sup + 1 res =", "import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on ranked result", "num_expr == 0: raise Exception('No experiment error') top_oot_nums = [sum(is_oot for _, is_oot", "variable denoting the number of OOT posts in the top N list. The", "posts taken (top N list) if M is None or n is None:", "element represents the probability of getting k OOT posts in the top N", "performance of OOT detector based on ranked result list.\"\"\" def __init__(self, result, M=None,", "this event. \"\"\" return max(self.N - self.M + self.n, 0) @lazyproperty def max_sup(self):", "or n is None: Mtemp, ntemp = self._get_nums() self.M = Mtemp if M", "Mtemp if M is None else M self.n = ntemp if n is", "n is None: Mtemp, ntemp = self._get_nums() self.M = Mtemp if M is", "from otdet.util import lazyproperty class TopListEvaluator: \"\"\"Evaluate performance of OOT detector based on", "on ranked result list.\"\"\" def __init__(self, result, M=None, n=None, N=1): if N <", "get_num_oot(subresult): return sum(is_oot for _, is_oot in subresult) temp = [(len(subresult), get_num_oot(subresult)) for", "0: raise Exception('Cannot pick negative number of posts in top list') self.result =" ]
[ "as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if", "abs(rtol)), desired + abs(atol)], axis=0) # Test for ISO 532-1 comformance (section 5.1)", "str Label for the tolerance curves xaxis : array_like, optional x axis for", "of ISO 532-1. It compares ``actual`` to ``desired +/- min(atol, rtol * abs(desired))``.", "optional To generate a \"compliance\" plot tol_label: str Label for the tolerance curves", "plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\",", "To generate a \"compliance\" plot tol_label: str Label for the tolerance curves xaxis", "coding: utf-8 -*- try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In", "desired - abs(atol)], axis=0) range_neg = np.amax( [desired * (1 + abs(rtol)), desired", "import numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\"", "desired. rtol : float, optional Relative tolerance. atol : float, optional Absolute tolerance.", "utf-8 -*- try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In order", "numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check", "# Define xaxis if xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot desired", "for ISO 532-1 comformance (section 5.1) is_isoclose = (actual >= range_pos).all() and (actual", "from section 5.1 of ISO 532-1. It compares ``actual`` to ``desired +/- min(atol,", "np.amax( [desired * (1 + abs(rtol)), desired + abs(atol)], axis=0) # Test for", "The test is inspired from section 5.1 of ISO 532-1. It compares ``actual``", "raise RuntimeError( \"In order to perform this validation you need the 'matplotlib' package.\"", "is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all() if is_plot: # Define", "'matplotlib' package.\" ) import numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False,", "perform this validation you need the 'matplotlib' package.\" ) import numpy as np", "import matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In order to perform this", "the tolerance curves xaxis : array_like, optional x axis for the \"compliance\" plot", "\"compliance\" plot Output ------ is_isoclose: bool False if actual and desired are not", "# label=\"\", linewidth=1, ) # Plot actual value plt.plot(xaxis, actual) plt.legend() return is_isoclose", "to desired tolerance. The test is inspired from section 5.1 of ISO 532-1.", "[desired * (1 + abs(rtol)), desired + abs(atol)], axis=0) # Test for ISO", "are not equal up to specified tolerance. \"\"\" # Tolerances range_pos = np.amin(", "ISO 532-1. It compares ``actual`` to ``desired +/- min(atol, rtol * abs(desired))``. Parameters", "axis for the \"compliance\" plot Output ------ is_isoclose: bool False if actual and", "x axis for the \"compliance\" plot Output ------ is_isoclose: bool False if actual", "rtol * abs(desired))``. Parameters ---------- actual : array_like Array obtained. desired : array_like", "except ImportError: raise RuntimeError( \"In order to perform this validation you need the", "= np.amax( [desired * (1 + abs(rtol)), desired + abs(atol)], axis=0) # Test", "def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays", "-*- coding: utf-8 -*- try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError(", "range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\",", "you need the 'matplotlib' package.\" ) import numpy as np def isoclose(actual, desired,", "= np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1,", "optional Absolute tolerance. is_plot : bool, optional To generate a \"compliance\" plot tol_label:", "up to specified tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired * (1", "(1 + abs(rtol)), desired + abs(atol)], axis=0) # Test for ISO 532-1 comformance", "desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos,", "range_neg = np.amax( [desired * (1 + abs(rtol)), desired + abs(atol)], axis=0) #", "array_like Array obtained. desired : array_like Array desired. rtol : float, optional Relative", "bool False if actual and desired are not equal up to specified tolerance.", "``desired +/- min(atol, rtol * abs(desired))``. Parameters ---------- actual : array_like Array obtained.", "desired : array_like Array desired. rtol : float, optional Relative tolerance. atol :", "* (1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax( [desired *", "np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two", "rtol : float, optional Relative tolerance. atol : float, optional Absolute tolerance. is_plot", ": array_like, optional x axis for the \"compliance\" plot Output ------ is_isoclose: bool", "isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays are", "\"compliance\" plot tol_label: str Label for the tolerance curves xaxis : array_like, optional", "tol_label=None, xaxis=None): \"\"\" Check if two arrays are equal up to desired tolerance.", "for the \"compliance\" plot Output ------ is_isoclose: bool False if actual and desired", "curves xaxis : array_like, optional x axis for the \"compliance\" plot Output ------", "xaxis : array_like, optional x axis for the \"compliance\" plot Output ------ is_isoclose:", "tolerance. The test is inspired from section 5.1 of ISO 532-1. It compares", "optional x axis for the \"compliance\" plot Output ------ is_isoclose: bool False if", "color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1,", "color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual value plt.plot(xaxis, actual) plt.legend()", "plot Output ------ is_isoclose: bool False if actual and desired are not equal", "atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays are equal up to", "rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays are equal up", "range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\",", ": float, optional Absolute tolerance. is_plot : bool, optional To generate a \"compliance\"", "to specified tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired * (1 -", "xaxis if xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot(", "\"\"\" Check if two arrays are equal up to desired tolerance. The test", "------ is_isoclose: bool False if actual and desired are not equal up to", "False if actual and desired are not equal up to specified tolerance. \"\"\"", "validation you need the 'matplotlib' package.\" ) import numpy as np def isoclose(actual,", "arrays are equal up to desired tolerance. The test is inspired from section", "np.amin( [desired * (1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax(", "axis=0) range_neg = np.amax( [desired * (1 + abs(rtol)), desired + abs(atol)], axis=0)", "Check if two arrays are equal up to desired tolerance. The test is", "(1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax( [desired * (1", "actual : array_like Array obtained. desired : array_like Array desired. rtol : float,", "Absolute tolerance. is_plot : bool, optional To generate a \"compliance\" plot tol_label: str", "desired + abs(atol)], axis=0) # Test for ISO 532-1 comformance (section 5.1) is_isoclose", "for the tolerance curves xaxis : array_like, optional x axis for the \"compliance\"", "<= range_neg).all() if is_plot: # Define xaxis if xaxis is None: xaxis =", "comformance (section 5.1) is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all() if", ": array_like Array obtained. desired : array_like Array desired. rtol : float, optional", "+/- min(atol, rtol * abs(desired))``. Parameters ---------- actual : array_like Array obtained. desired", ") import numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None):", "Output ------ is_isoclose: bool False if actual and desired are not equal up", ": float, optional Relative tolerance. atol : float, optional Absolute tolerance. is_plot :", "Relative tolerance. atol : float, optional Absolute tolerance. is_plot : bool, optional To", "matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In order to perform this validation", "if actual and desired are not equal up to specified tolerance. \"\"\" #", "label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) #", "compares ``actual`` to ``desired +/- min(atol, rtol * abs(desired))``. Parameters ---------- actual :", "array_like Array desired. rtol : float, optional Relative tolerance. atol : float, optional", "and (actual <= range_neg).all() if is_plot: # Define xaxis if xaxis is None:", "two arrays are equal up to desired tolerance. The test is inspired from", "the \"compliance\" plot Output ------ is_isoclose: bool False if actual and desired are", "(actual >= range_pos).all() and (actual <= range_neg).all() if is_plot: # Define xaxis if", ") plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual", "Array obtained. desired : array_like Array desired. rtol : float, optional Relative tolerance.", "# Test for ISO 532-1 comformance (section 5.1) is_isoclose = (actual >= range_pos).all()", "xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual value plt.plot(xaxis,", "up to desired tolerance. The test is inspired from section 5.1 of ISO", "abs(atol)], axis=0) # Test for ISO 532-1 comformance (section 5.1) is_isoclose = (actual", ": bool, optional To generate a \"compliance\" plot tol_label: str Label for the", "linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, )", "Array desired. rtol : float, optional Relative tolerance. atol : float, optional Absolute", "* abs(desired))``. Parameters ---------- actual : array_like Array obtained. desired : array_like Array", "- abs(atol)], axis=0) range_neg = np.amax( [desired * (1 + abs(rtol)), desired +", "None: xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\",", "tolerance. atol : float, optional Absolute tolerance. is_plot : bool, optional To generate", ">= range_pos).all() and (actual <= range_neg).all() if is_plot: # Define xaxis if xaxis", "tolerance. is_plot : bool, optional To generate a \"compliance\" plot tol_label: str Label", "is_plot : bool, optional To generate a \"compliance\" plot tol_label: str Label for", "= (actual >= range_pos).all() and (actual <= range_neg).all() if is_plot: # Define xaxis", "need the 'matplotlib' package.\" ) import numpy as np def isoclose(actual, desired, rtol=1e-7,", "is_plot: # Define xaxis if xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot", "# Tolerances range_pos = np.amin( [desired * (1 - abs(rtol)), desired - abs(atol)],", "generate a \"compliance\" plot tol_label: str Label for the tolerance curves xaxis :", "Tolerances range_pos = np.amin( [desired * (1 - abs(rtol)), desired - abs(atol)], axis=0)", "\"In order to perform this validation you need the 'matplotlib' package.\" ) import", "RuntimeError( \"In order to perform this validation you need the 'matplotlib' package.\" )", "range_neg).all() if is_plot: # Define xaxis if xaxis is None: xaxis = np.arange(actual.shape[0])", "as plt except ImportError: raise RuntimeError( \"In order to perform this validation you", "atol : float, optional Absolute tolerance. is_plot : bool, optional To generate a", "---------- actual : array_like Array obtained. desired : array_like Array desired. rtol :", "(section 5.1) is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all() if is_plot:", "if two arrays are equal up to desired tolerance. The test is inspired", "Parameters ---------- actual : array_like Array obtained. desired : array_like Array desired. rtol", "xaxis=None): \"\"\" Check if two arrays are equal up to desired tolerance. The", "abs(atol)], axis=0) range_neg = np.amax( [desired * (1 + abs(rtol)), desired + abs(atol)],", "optional Relative tolerance. atol : float, optional Absolute tolerance. is_plot : bool, optional", "``actual`` to ``desired +/- min(atol, rtol * abs(desired))``. Parameters ---------- actual : array_like", "the 'matplotlib' package.\" ) import numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0,", "532-1. It compares ``actual`` to ``desired +/- min(atol, rtol * abs(desired))``. Parameters ----------", "min(atol, rtol * abs(desired))``. Parameters ---------- actual : array_like Array obtained. desired :", "Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis,", "and desired are not equal up to specified tolerance. \"\"\" # Tolerances range_pos", "+ abs(atol)], axis=0) # Test for ISO 532-1 comformance (section 5.1) is_isoclose =", "tol_label: str Label for the tolerance curves xaxis : array_like, optional x axis", "desired tolerance. The test is inspired from section 5.1 of ISO 532-1. It", "5.1 of ISO 532-1. It compares ``actual`` to ``desired +/- min(atol, rtol *", "this validation you need the 'matplotlib' package.\" ) import numpy as np def", "float, optional Absolute tolerance. is_plot : bool, optional To generate a \"compliance\" plot", "inspired from section 5.1 of ISO 532-1. It compares ``actual`` to ``desired +/-", "axis=0) # Test for ISO 532-1 comformance (section 5.1) is_isoclose = (actual >=", "plot tol_label: str Label for the tolerance curves xaxis : array_like, optional x", "are equal up to desired tolerance. The test is inspired from section 5.1", "\"\"\" # Tolerances range_pos = np.amin( [desired * (1 - abs(rtol)), desired -", "abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax( [desired * (1 + abs(rtol)),", "xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg,", "array_like, optional x axis for the \"compliance\" plot Output ------ is_isoclose: bool False", "5.1) is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all() if is_plot: #", "a \"compliance\" plot tol_label: str Label for the tolerance curves xaxis : array_like,", "# -*- coding: utf-8 -*- try: import matplotlib.pyplot as plt except ImportError: raise", "tolerance curves xaxis : array_like, optional x axis for the \"compliance\" plot Output", "abs(desired))``. Parameters ---------- actual : array_like Array obtained. desired : array_like Array desired.", "range_pos = np.amin( [desired * (1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg", "range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual value plt.plot(xaxis, actual)", "<gh_stars>1-10 # -*- coding: utf-8 -*- try: import matplotlib.pyplot as plt except ImportError:", "is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays are equal up to desired", "- abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax( [desired * (1 +", "It compares ``actual`` to ``desired +/- min(atol, rtol * abs(desired))``. Parameters ---------- actual", "532-1 comformance (section 5.1) is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all()", "Label for the tolerance curves xaxis : array_like, optional x axis for the", "section 5.1 of ISO 532-1. It compares ``actual`` to ``desired +/- min(atol, rtol", "to ``desired +/- min(atol, rtol * abs(desired))``. Parameters ---------- actual : array_like Array", "(actual <= range_neg).all() if is_plot: # Define xaxis if xaxis is None: xaxis", "[desired * (1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg = np.amax( [desired", "if xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis,", "desired are not equal up to specified tolerance. \"\"\" # Tolerances range_pos =", "if is_plot: # Define xaxis if xaxis is None: xaxis = np.arange(actual.shape[0]) #", "* (1 + abs(rtol)), desired + abs(atol)], axis=0) # Test for ISO 532-1", "actual and desired are not equal up to specified tolerance. \"\"\" # Tolerances", "desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None): \"\"\" Check if two arrays are equal", "+ abs(rtol)), desired + abs(atol)], axis=0) # Test for ISO 532-1 comformance (section", "plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual value", "linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot", "Define xaxis if xaxis is None: xaxis = np.arange(actual.shape[0]) # Plot desired range", "is_isoclose: bool False if actual and desired are not equal up to specified", "specified tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired * (1 - abs(rtol)),", "float, optional Relative tolerance. atol : float, optional Absolute tolerance. is_plot : bool,", "plt except ImportError: raise RuntimeError( \"In order to perform this validation you need", "obtained. desired : array_like Array desired. rtol : float, optional Relative tolerance. atol", "is None: xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\",", "is inspired from section 5.1 of ISO 532-1. It compares ``actual`` to ``desired", "xaxis = np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label,", "test is inspired from section 5.1 of ISO 532-1. It compares ``actual`` to", "equal up to desired tolerance. The test is inspired from section 5.1 of", "= np.amin( [desired * (1 - abs(rtol)), desired - abs(atol)], axis=0) range_neg =", "xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot( xaxis, range_pos, color=\"tab:red\", linestyle=\"solid\", #", "bool, optional To generate a \"compliance\" plot tol_label: str Label for the tolerance", "tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired * (1 - abs(rtol)), desired", "-*- try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In order to", "np.arange(actual.shape[0]) # Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, )", "linestyle=\"solid\", # label=\"\", linewidth=1, ) # Plot actual value plt.plot(xaxis, actual) plt.legend() return", ": array_like Array desired. rtol : float, optional Relative tolerance. atol : float,", "# Plot desired range plt.plot( xaxis, range_neg, color=\"tab:red\", linestyle=\"solid\", label=tol_label, linewidth=1, ) plt.plot(", "ImportError: raise RuntimeError( \"In order to perform this validation you need the 'matplotlib'", "package.\" ) import numpy as np def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None,", "try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError( \"In order to perform", "Test for ISO 532-1 comformance (section 5.1) is_isoclose = (actual >= range_pos).all() and", "to perform this validation you need the 'matplotlib' package.\" ) import numpy as", "not equal up to specified tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired", "range_pos).all() and (actual <= range_neg).all() if is_plot: # Define xaxis if xaxis is", "equal up to specified tolerance. \"\"\" # Tolerances range_pos = np.amin( [desired *", "ISO 532-1 comformance (section 5.1) is_isoclose = (actual >= range_pos).all() and (actual <=", "order to perform this validation you need the 'matplotlib' package.\" ) import numpy" ]
[ "(or set of pixels) to squeeze into one pixel in feature map, while", "the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64,", "and flips and zooms to the images # https://keras.io/api/preprocessing/image/ # rescale property is", "64), batch_size = 32, class_mode = 'binary') # [2] define the model, according", "to compare the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size", "we have a big dataset of pictures (250mb) with cats and dogs, locally", "(and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam", "from a Keras example, click link above # the model will look at", "a big dataset of pictures (250mb) with cats and dogs, locally stored (in", "(64, 64), batch_size = 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255)", "images will be processed in batches (batch => extra dimension) test_image = image.img_to_array(test_image)", "strides is about shifting the frame of pixels to capture next pixels to", "image into a numpy array, then expand the array into an extra dimension", "zoom_range = 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size =", "pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') test_datagen", "here we take all the pixels and flatten them into a vector that", "is 1, then dog; if 0, then cat; we know what index corresponds", "'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but do not transform/augment", "need just one neuron for binary classification as output (0/1, or cat/dog) #", "to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # --", "more complex and we may get more accuracy # -- output layer model.add(layers.Dense(units=1,", "capture next pixels to observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3,", "and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here", "= 'binary') # [2] define the model, according to new keras instructions #", "picture # ------ # -- load a specific image to observe after training", "if 0, then cat; we know what index corresponds to which class by", "convert the image into a numpy array, then expand the array into an", "from tensorflow.keras import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator #", "= 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we scale", "Keras example, click link above # the model will look at images in", "complex and we may get more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid'))", "(batch => extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0)", "filters in convolution, kernel is the CNN feature detector square, input shape for", "may get more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need", "first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set of", "pixels to observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2,", "= ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)", "activation='relu')) # neurons are high here because processing images is more complex and", "# -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing images", "we apply shifts and rotations and flips and zooms to the images #", "and we may get more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) #", "the pool (or set of pixels) to squeeze into one pixel in feature", "tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import image", "tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning -", "target_size = (64, 64)) # -- convert the image into a numpy array,", "shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics", "(64, 64), batch_size = 32, class_mode = 'binary') # [2] define the model,", "image to observe after training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size", "activation='relu', input_shape=[64, 64, 3])) # filters are output filters in convolution, kernel is", "model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing images is more complex", "= np.expand_dims(test_image, axis = 0) # -- make a prediction in terms of", "the originals to compare the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics", "to avoid over-fitting => we apply shifts and rotations and flips and zooms", "according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() #", "import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import", "scaling while other properties below are from a Keras example, click link above", "pictures (250mb) with cats and dogs, locally stored (in folder 5 - Tech)", "metrics = ['accuracy']) # adam is a way to perform the stochastic gradient", "dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert the image into a numpy", "a first prediction around a single picture # ------ # -- load a", "# *** # we have a big dataset of pictures (250mb) with cats", "a numpy array, then expand the array into an extra dimension as images", "is about feature scaling while other properties below are from a Keras example,", "= image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert the image", "array into an extra dimension as images will be processed in batches (batch", "a Keras example, click link above # the model will look at images", "train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')", "and flatten them into a vector that keeps the dimensional charateristics of a", "- CNN for classification # *** # we have a big dataset of", "processing images is more complex and we may get more accuracy # --", "import numpy as np import pandas as pd import matplotlib.pyplot as plt import", "pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here we", "test_image = np.expand_dims(test_image, axis = 0) # -- make a prediction in terms", "just one neuron for binary classification as output (0/1, or cat/dog) # [3]", "# *** # [1] load and pre-process data # ------ # -- we", "adam is a way to perform the stochastic gradient descent model.fit(x = training_set,", "keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))", "= training_set, validation_data = test_set, epochs = 25) # print(model.summary()) # [4] try", "to which class by calling the attribute class_indices as below... print(training_set.class_indices) if result[0][0]", "need the originals to compare the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN", "pixels to capture next pixels to observe # -- second convolution and pooling", "as plt import tensorflow as tf from tensorflow import keras from tensorflow.keras import", "keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # -- first convolution", "for classification # *** # we have a big dataset of pictures (250mb)", "attribute class_indices as below... print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog' else:", "input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set of pixels)", "we scale but do not transform/augment the testset images as we need the", "- Tech) # *** # [1] load and pre-process data # ------ #", "testset images as we need the originals to compare the effectiveness of our", "will look at images in batches as usual train_datagen = ImageDataGenerator(rescale = 1./255,", "of a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here", "set of pixels) to squeeze into one pixel in feature map, while strides", "['accuracy']) # adam is a way to perform the stochastic gradient descent model.fit(x", "first prediction around a single picture # ------ # -- load a specific", "with cats and dogs, locally stored (in folder 5 - Tech) # ***", "of either 0 or 1 result = cnn.predict(test_image) # -- decode: if prediction", "the image into a numpy array, then expand the array into an extra", "to the images # https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling while", "numpy array, then expand the array into an extra dimension as images will", "index corresponds to which class by calling the attribute class_indices as below... print(training_set.class_indices)", "class by calling the attribute class_indices as below... print(training_set.class_indices) if result[0][0] == 1:", "# -- decode: if prediction is 1, then dog; if 0, then cat;", "image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert the image into", "big dataset of pictures (250mb) with cats and dogs, locally stored (in folder", "test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode", "augmentation/transformations here to avoid over-fitting => we apply shifts and rotations and flips", "ImageDataGenerator(rescale = 1./255) # we scale but do not transform/augment the testset images", "extra dimension as images will be processed in batches (batch => extra dimension)", "and rotations and flips and zooms to the images # https://keras.io/api/preprocessing/image/ # rescale", "the model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model =", "of pixels to capture next pixels to observe # -- second convolution and", "------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way to perform", "https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32,", "property is about feature scaling while other properties below are from a Keras", "= train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode =", "layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set of pixels) to", "pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output filters in", "# adam is a way to perform the stochastic gradient descent model.fit(x =", "to observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2))", "input_shape=[64, 64, 3])) # filters are output filters in convolution, kernel is the", "is more complex and we may get more accuracy # -- output layer", "model.fit(x = training_set, validation_data = test_set, epochs = 25) # print(model.summary()) # [4]", "frame of pixels to capture next pixels to observe # -- second convolution", "gradient descent model.fit(x = training_set, validation_data = test_set, epochs = 25) # print(model.summary())", "import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from", "# -- make a prediction in terms of either 0 or 1 result", "64)) # -- convert the image into a numpy array, then expand the", "properties below are from a Keras example, click link above # the model", "pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras", "the CNN feature detector square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2))", "as output (0/1, or cat/dog) # [3] fit (and run/train) the model #", "# ------ # -- load a specific image to observe after training and", "next pixels to observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu'))", "model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here we take all the pixels", "compare the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size =", "instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # -- first convolution and", "observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) #", "(0/1, or cat/dog) # [3] fit (and run/train) the model # ------ model.compile(optimizer='adam',", "while other properties below are from a Keras example, click link above #", "loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way to perform the stochastic", "the pixels and flatten them into a vector that keeps the dimensional charateristics", "prediction is 1, then dog; if 0, then cat; we know what index", "# ------ model = keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3,", "the frame of pixels to capture next pixels to observe # -- second", "extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) # --", "into one pixel in feature map, while strides is about shifting the frame", "but do not transform/augment the testset images as we need the originals to", "-- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for binary", "vector that keeps the dimensional charateristics of a picture # -- connect model.add(layers.Dense(units=128,", "# [3] fit (and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics =", "rescale property is about feature scaling while other properties below are from a", "feature detector square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size", "dimensional charateristics of a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are", "# we need just one neuron for binary classification as output (0/1, or", "above # the model will look at images in batches as usual train_datagen", "CNN feature detector square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) #", "example, click link above # the model will look at images in batches", "over-fitting => we apply shifts and rotations and flips and zooms to the", "flatten them into a vector that keeps the dimensional charateristics of a picture", "= True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size =", "# ------ # -- we apply image augmentation/transformations here to avoid over-fitting =>", "of pictures (250mb) with cats and dogs, locally stored (in folder 5 -", "model.add(layers.Flatten()) # here we take all the pixels and flatten them into a", "picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing", "'binary') # [2] define the model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn", "size of the pool (or set of pixels) to squeeze into one pixel", "[3] fit (and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy'])", "training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32,", "model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential()", "training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode", "as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import", "from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import image from", "*** # we have a big dataset of pictures (250mb) with cats and", "load a specific image to observe after training and predict test_image = image.load_img('../data/CNN", "image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) # -- make a prediction in", "# print(model.summary()) # [4] try a first prediction around a single picture #", "horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size", "batches as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range =", "= 0) # -- make a prediction in terms of either 0 or", "or 1 result = cnn.predict(test_image) # -- decode: if prediction is 1, then", "= test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode =", "# [4] try a first prediction around a single picture # ------ #", "------ # -- load a specific image to observe after training and predict", "scale but do not transform/augment the testset images as we need the originals", "kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here we take all", "target_size = (64, 64), batch_size = 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale", "train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip =", "dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') # [2]", "*** # [1] load and pre-process data # ------ # -- we apply", "descent model.fit(x = training_set, validation_data = test_set, epochs = 25) # print(model.summary()) #", "the model will look at images in batches as usual train_datagen = ImageDataGenerator(rescale", "for binary classification as output (0/1, or cat/dog) # [3] fit (and run/train)", "(in folder 5 - Tech) # *** # [1] load and pre-process data", "to capture next pixels to observe # -- second convolution and pooling model.add(layers.Conv2D(filters=32,", "square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the", "tensorflow.keras import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # ---------------------------", "the dimensional charateristics of a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons", "images is more complex and we may get more accuracy # -- output", "model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way", "and pre-process data # ------ # -- we apply image augmentation/transformations here to", "here to avoid over-fitting => we apply shifts and rotations and flips and", "as below... print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog' else: prediction =", "prediction in terms of either 0 or 1 result = cnn.predict(test_image) # --", "pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') #", "a way to perform the stochastic gradient descent model.fit(x = training_set, validation_data =", "-- make a prediction in terms of either 0 or 1 result =", "= (64, 64)) # -- convert the image into a numpy array, then", "folder 5 - Tech) # *** # [1] load and pre-process data #", "have a big dataset of pictures (250mb) with cats and dogs, locally stored", "look at images in batches as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range", "after training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))", "around a single picture # ------ # -- load a specific image to", "64), batch_size = 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) #", "-- decode: if prediction is 1, then dog; if 0, then cat; we", "layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for binary classification as", "in convolution, kernel is the CNN feature detector square, input shape for first", "result = cnn.predict(test_image) # -- decode: if prediction is 1, then dog; if", "test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) # -- make a", "deep learning - CNN for classification # *** # we have a big", "dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') test_datagen =", "to perform the stochastic gradient descent model.fit(x = training_set, validation_data = test_set, epochs", "not transform/augment the testset images as we need the originals to compare the", "# we scale but do not transform/augment the testset images as we need", "calling the attribute class_indices as below... print(training_set.class_indices) if result[0][0] == 1: prediction =", "detector square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of", "take all the pixels and flatten them into a vector that keeps the", "in batches as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range", "= image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) # -- make a prediction", "=> we apply shifts and rotations and flips and zooms to the images", "then cat; we know what index corresponds to which class by calling the", "output (0/1, or cat/dog) # [3] fit (and run/train) the model # ------", "then expand the array into an extra dimension as images will be processed", "activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here we take all the", "second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten())", "are output filters in convolution, kernel is the CNN feature detector square, input", "what index corresponds to which class by calling the attribute class_indices as below...", "high here because processing images is more complex and we may get more", "= 1./255) # we scale but do not transform/augment the testset images as", "activation='sigmoid')) # we need just one neuron for binary classification as output (0/1,", "link above # the model will look at images in batches as usual", "(250mb) with cats and dogs, locally stored (in folder 5 - Tech) #", "observe after training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64,", "the images # https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling while other", "avoid over-fitting => we apply shifts and rotations and flips and zooms to", "are high here because processing images is more complex and we may get", "test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert the", "is about shifting the frame of pixels to capture next pixels to observe", "5 - Tech) # *** # [1] load and pre-process data # ------", "about feature scaling while other properties below are from a Keras example, click", "our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size =", "cat/dog) # [3] fit (and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics", "= (64, 64), batch_size = 32, class_mode = 'binary') # [2] define the", "if prediction is 1, then dog; if 0, then cat; we know what", "validation_data = test_set, epochs = 25) # print(model.summary()) # [4] try a first", "locally stored (in folder 5 - Tech) # *** # [1] load and", "input shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool", "------ # -- we apply image augmentation/transformations here to avoid over-fitting => we", "model will look at images in batches as usual train_datagen = ImageDataGenerator(rescale =", "dog; if 0, then cat; we know what index corresponds to which class", "at images in batches as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range =", "pixels) to squeeze into one pixel in feature map, while strides is about", "are from a Keras example, click link above # the model will look", "learning - CNN for classification # *** # we have a big dataset", "fit (and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) #", "test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')", "test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but do not transform/augment the", "ImageDataGenerator # --------------------------- # deep learning - CNN for classification # *** #", "# neurons are high here because processing images is more complex and we", "click link above # the model will look at images in batches as", "and dogs, locally stored (in folder 5 - Tech) # *** # [1]", "cnn.predict(test_image) # -- decode: if prediction is 1, then dog; if 0, then", "prediction around a single picture # ------ # -- load a specific image", "as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as", "the testset images as we need the originals to compare the effectiveness of", "is a way to perform the stochastic gradient descent model.fit(x = training_set, validation_data", "new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # -- first", "images as we need the originals to compare the effectiveness of our training/learning", "as images will be processed in batches (batch => extra dimension) test_image =", "# here we take all the pixels and flatten them into a vector", "be processed in batches (batch => extra dimension) test_image = image.img_to_array(test_image) test_image =", "-- flatten model.add(layers.Flatten()) # here we take all the pixels and flatten them", "0 or 1 result = cnn.predict(test_image) # -- decode: if prediction is 1,", "shifts and rotations and flips and zooms to the images # https://keras.io/api/preprocessing/image/ #", "0, then cat; we know what index corresponds to which class by calling", "= 0.2, zoom_range = 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set',", "= 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) training_set =", "data # ------ # -- we apply image augmentation/transformations here to avoid over-fitting", "originals to compare the effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set',", "flatten model.add(layers.Flatten()) # here we take all the pixels and flatten them into", "np.expand_dims(test_image, axis = 0) # -- make a prediction in terms of either", "# rescale property is about feature scaling while other properties below are from", "specific image to observe after training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg',", "we apply image augmentation/transformations here to avoid over-fitting => we apply shifts and", "perform the stochastic gradient descent model.fit(x = training_set, validation_data = test_set, epochs =", "decode: if prediction is 1, then dog; if 0, then cat; we know", "the attribute class_indices as below... print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog'", "# -- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # --", "model = keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64,", "batches (batch => extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis =", "tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning - CNN for classification #", "pixel in feature map, while strides is about shifting the frame of pixels", "0) # -- make a prediction in terms of either 0 or 1", "as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2,", "batch_size = 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we", "of the pool (or set of pixels) to squeeze into one pixel in", "run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is", "# -- flatten model.add(layers.Flatten()) # here we take all the pixels and flatten", "model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) # here we take", "64, 3])) # filters are output filters in convolution, kernel is the CNN", "in batches (batch => extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis", "import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from", "zooms to the images # https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling", "import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- #", "in feature map, while strides is about shifting the frame of pixels to", "squeeze into one pixel in feature map, while strides is about shifting the", "will be processed in batches (batch => extra dimension) test_image = image.img_to_array(test_image) test_image", "either 0 or 1 result = cnn.predict(test_image) # -- decode: if prediction is", "0.2, zoom_range = 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size", "np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf", "32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but", "= ['accuracy']) # adam is a way to perform the stochastic gradient descent", "first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are", "# filters are output filters in convolution, kernel is the CNN feature detector", "image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning - CNN for", "class_mode = 'binary') # [2] define the model, according to new keras instructions", "expand the array into an extra dimension as images will be processed in", "array, then expand the array into an extra dimension as images will be", "to squeeze into one pixel in feature map, while strides is about shifting", "model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way to perform the", "an extra dimension as images will be processed in batches (batch => extra", "= 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64,", "kernel is the CNN feature detector square, input shape for first input layer", "them into a vector that keeps the dimensional charateristics of a picture #", "effectiveness of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64),", "other properties below are from a Keras example, click link above # the", "predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert", "1 result = cnn.predict(test_image) # -- decode: if prediction is 1, then dog;", "below... print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog' else: prediction = 'cat'", "we take all the pixels and flatten them into a vector that keeps", "# size of the pool (or set of pixels) to squeeze into one", "= 25) # print(model.summary()) # [4] try a first prediction around a single", "a vector that keeps the dimensional charateristics of a picture # -- connect", "then dog; if 0, then cat; we know what index corresponds to which", "Tech) # *** # [1] load and pre-process data # ------ # --", "more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one", "shifting the frame of pixels to capture next pixels to observe # --", "from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning - CNN for classification", "convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output", "# --------------------------- # deep learning - CNN for classification # *** # we", "import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning - CNN", "load and pre-process data # ------ # -- we apply image augmentation/transformations here", "model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for binary classification as output", "cats and dogs, locally stored (in folder 5 - Tech) # *** #", "training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) #", "------ model = keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu',", "all the pixels and flatten them into a vector that keeps the dimensional", "connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing images is more", "test_set, epochs = 25) # print(model.summary()) # [4] try a first prediction around", "as we need the originals to compare the effectiveness of our training/learning test_set", "feature scaling while other properties below are from a Keras example, click link", "or cat/dog) # [3] fit (and run/train) the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy',", "# -- load a specific image to observe after training and predict test_image", "and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # --", "know what index corresponds to which class by calling the attribute class_indices as", "way to perform the stochastic gradient descent model.fit(x = training_set, validation_data = test_set,", "output filters in convolution, kernel is the CNN feature detector square, input shape", "pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # -- convert the image into a", "1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN", "get more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just", "accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron", "= (64, 64), batch_size = 32, class_mode = 'binary') test_datagen = ImageDataGenerator(rescale =", "= keras.Sequential() # -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64,", "from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep learning", "True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32,", "= 32, class_mode = 'binary') # [2] define the model, according to new", "ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) training_set", "# ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way to", "processed in batches (batch => extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image,", "# [1] load and pre-process data # ------ # -- we apply image", "dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) # -- make", "of our training/learning test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size", "print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog' else: prediction = 'cat' print(prediction)", "# https://www.tensorflow.org/guide/keras/rnn # ------ model = keras.Sequential() # -- first convolution and pooling", "kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output filters in convolution, kernel", "the model # ------ model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a", "images # https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling while other properties", "images in batches as usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2,", "stochastic gradient descent model.fit(x = training_set, validation_data = test_set, epochs = 25) #", "pool (or set of pixels) to squeeze into one pixel in feature map,", "strides=2)) # -- flatten model.add(layers.Flatten()) # here we take all the pixels and", "is the CNN feature detector square, input shape for first input layer model.add(layers.MaxPool2D(pool_size=2,", "in terms of either 0 or 1 result = cnn.predict(test_image) # -- decode:", "we know what index corresponds to which class by calling the attribute class_indices", "3])) # filters are output filters in convolution, kernel is the CNN feature", "dogs, locally stored (in folder 5 - Tech) # *** # [1] load", "apply image augmentation/transformations here to avoid over-fitting => we apply shifts and rotations", "# -- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) #", "a single picture # ------ # -- load a specific image to observe", "we may get more accuracy # -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we", "a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because", "try a first prediction around a single picture # ------ # -- load", "-- second convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten", "convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(layers.MaxPool2D(pool_size=2, strides=2)) # -- flatten model.add(layers.Flatten()) #", "print(model.summary()) # [4] try a first prediction around a single picture # ------", "output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for binary classification", "neurons are high here because processing images is more complex and we may", "neuron for binary classification as output (0/1, or cat/dog) # [3] fit (and", "and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output filters", "feature map, while strides is about shifting the frame of pixels to capture", "filters are output filters in convolution, kernel is the CNN feature detector square,", "model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set of pixels) to squeeze", "while strides is about shifting the frame of pixels to capture next pixels", "keeps the dimensional charateristics of a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) #", "dimension as images will be processed in batches (batch => extra dimension) test_image", "stored (in folder 5 - Tech) # *** # [1] load and pre-process", "-- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing images is", "because processing images is more complex and we may get more accuracy #", "1, then dog; if 0, then cat; we know what index corresponds to", "image augmentation/transformations here to avoid over-fitting => we apply shifts and rotations and", "-- we apply image augmentation/transformations here to avoid over-fitting => we apply shifts", "that keeps the dimensional charateristics of a picture # -- connect model.add(layers.Dense(units=128, activation='relu'))", "flips and zooms to the images # https://keras.io/api/preprocessing/image/ # rescale property is about", "plt import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers", "transform/augment the testset images as we need the originals to compare the effectiveness", "do not transform/augment the testset images as we need the originals to compare", "into a numpy array, then expand the array into an extra dimension as", "by calling the attribute class_indices as below... print(training_set.class_indices) if result[0][0] == 1: prediction", "=> extra dimension) test_image = image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis = 0) #", "1./255) # we scale but do not transform/augment the testset images as we", "# -- convert the image into a numpy array, then expand the array", "one pixel in feature map, while strides is about shifting the frame of", "pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow", "dataset of pictures (250mb) with cats and dogs, locally stored (in folder 5", "shape for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or", "25) # print(model.summary()) # [4] try a first prediction around a single picture", "the array into an extra dimension as images will be processed in batches", "-- convert the image into a numpy array, then expand the array into", "layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator # --------------------------- # deep", "strides=2)) # size of the pool (or set of pixels) to squeeze into", "to observe after training and predict test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size =", "# the model will look at images in batches as usual train_datagen =", "# -- output layer model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for", "of pixels) to squeeze into one pixel in feature map, while strides is", "classification as output (0/1, or cat/dog) # [3] fit (and run/train) the model", "CNN for classification # *** # we have a big dataset of pictures", "class_mode = 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but do", "one neuron for binary classification as output (0/1, or cat/dog) # [3] fit", "# -- we apply image augmentation/transformations here to avoid over-fitting => we apply", "below are from a Keras example, click link above # the model will", "-- load a specific image to observe after training and predict test_image =", "axis = 0) # -- make a prediction in terms of either 0", "cat; we know what index corresponds to which class by calling the attribute", "as pd import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import", "into a vector that keeps the dimensional charateristics of a picture # --", "keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator", "charateristics of a picture # -- connect model.add(layers.Dense(units=128, activation='relu')) # neurons are high", "matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from tensorflow.keras", "<reponame>warpalatino/public import numpy as np import pandas as pd import matplotlib.pyplot as plt", "32, class_mode = 'binary') # [2] define the model, according to new keras", "# https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling while other properties below", "[2] define the model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------", "# deep learning - CNN for classification # *** # we have a", "https://keras.io/api/preprocessing/image/ # rescale property is about feature scaling while other properties below are", "training_set, validation_data = test_set, epochs = 25) # print(model.summary()) # [4] try a", "batch_size = 32, class_mode = 'binary') # [2] define the model, according to", "0.2, horizontal_flip = True) training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64),", "= cnn.predict(test_image) # -- decode: if prediction is 1, then dog; if 0,", "make a prediction in terms of either 0 or 1 result = cnn.predict(test_image)", "pre-process data # ------ # -- we apply image augmentation/transformations here to avoid", "= ImageDataGenerator(rescale = 1./255) # we scale but do not transform/augment the testset", "# we have a big dataset of pictures (250mb) with cats and dogs,", "tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image", "apply shifts and rotations and flips and zooms to the images # https://keras.io/api/preprocessing/image/", "here because processing images is more complex and we may get more accuracy", "we need just one neuron for binary classification as output (0/1, or cat/dog)", "model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output filters in convolution,", "about shifting the frame of pixels to capture next pixels to observe #", "define the model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn # ------ model", "convolution, kernel is the CNN feature detector square, input shape for first input", "usual train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip", "into an extra dimension as images will be processed in batches (batch =>", "class_indices as below... print(training_set.class_indices) if result[0][0] == 1: prediction = 'dog' else: prediction", "we need the originals to compare the effectiveness of our training/learning test_set =", "for first input layer model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set", "the stochastic gradient descent model.fit(x = training_set, validation_data = test_set, epochs = 25)", "target_size = (64, 64), batch_size = 32, class_mode = 'binary') # [2] define", "--------------------------- # deep learning - CNN for classification # *** # we have", "a specific image to observe after training and predict test_image = image.load_img('../data/CNN pics", "= test_set, epochs = 25) # print(model.summary()) # [4] try a first prediction", "a prediction in terms of either 0 or 1 result = cnn.predict(test_image) #", "numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow", "and zooms to the images # https://keras.io/api/preprocessing/image/ # rescale property is about feature", "corresponds to which class by calling the attribute class_indices as below... print(training_set.class_indices) if", "binary classification as output (0/1, or cat/dog) # [3] fit (and run/train) the", "-- first convolution and pooling model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters", "which class by calling the attribute class_indices as below... print(training_set.class_indices) if result[0][0] ==", "import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from", "map, while strides is about shifting the frame of pixels to capture next", "pixels and flatten them into a vector that keeps the dimensional charateristics of", "tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing", "classification # *** # we have a big dataset of pictures (250mb) with", "[4] try a first prediction around a single picture # ------ # --", "= 'binary') test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but do not", "epochs = 25) # print(model.summary()) # [4] try a first prediction around a", "# [2] define the model, according to new keras instructions # https://www.tensorflow.org/guide/keras/rnn #", "single picture # ------ # -- load a specific image to observe after", "rotations and flips and zooms to the images # https://keras.io/api/preprocessing/image/ # rescale property", "import ImageDataGenerator # --------------------------- # deep learning - CNN for classification # ***", "[1] load and pre-process data # ------ # -- we apply image augmentation/transformations", "(64, 64)) # -- convert the image into a numpy array, then expand", "terms of either 0 or 1 result = cnn.predict(test_image) # -- decode: if" ]
[ "START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if", "player_position = 1 computer_position = 1 def board(): print(middle_dot*(player_position - 1) + \"P\"", "print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if", "+= random.randint(1, 6) if player_position > 30: player_position = 30 board() if player_position", "+= random.randint(1, 6) if computer_position > 30: computer_position = 30 board() if computer_position", "player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position +=", "<reponame>rrbb014/rrbb-playground<gh_stars>0 import random middle_dot = chr(0xb7) # U+00B7 -> 16 * 11 +", "input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if player_position > 30:", "computer_position > 30: computer_position = 30 board() if computer_position == 30: print(\"컴퓨터의 승리\")", "말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position > 30: computer_position = 30", ") if __name__ == \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의", "+ \"GOAL\" ) print(middle_dot * (computer_position - 1) + \"C\" + middle_dot *", "(computer_position - 1) + \"C\" + middle_dot * (30 - computer_position) + \"GOAL\"", "# U+00B7 -> 16 * 11 + 7 -> 183 player_position = 1", "* (30 - computer_position) + \"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임", "middle_dot * (30 - computer_position) + \"GOAL\" ) if __name__ == \"__main__\": board()", "player_position += random.randint(1, 6) if player_position > 30: player_position = 30 board() if", "6) if player_position > 30: player_position = 30 board() if player_position == 30:", "= 1 computer_position = 1 def board(): print(middle_dot*(player_position - 1) + \"P\" +", "움직입니다.\") player_position += random.randint(1, 6) if player_position > 30: player_position = 30 board()", "30 board() if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이", "(30 - computer_position) + \"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임 START\")", "1) + \"C\" + middle_dot * (30 - computer_position) + \"GOAL\" ) if", "if computer_position > 30: computer_position = 30 board() if computer_position == 30: print(\"컴퓨터의", "1) + \"P\" + middle_dot * (30 - player_position) + \"GOAL\" ) print(middle_dot", "board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1,", "= 30 board() if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의", "random.randint(1, 6) if computer_position > 30: computer_position = 30 board() if computer_position ==", "computer_position) + \"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임 START\") while True:", ") print(middle_dot * (computer_position - 1) + \"C\" + middle_dot * (30 -", "\"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면", "board() if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\")", "> 30: player_position = 30 board() if player_position == 30: print(\"여러분의 승리\") break", "middle_dot * (30 - player_position) + \"GOAL\" ) print(middle_dot * (computer_position - 1)", "while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if player_position", "+ 7 -> 183 player_position = 1 computer_position = 1 def board(): print(middle_dot*(player_position", "+ \"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를", "7 -> 183 player_position = 1 computer_position = 1 def board(): print(middle_dot*(player_position -", "말이 움직입니다.\") player_position += random.randint(1, 6) if player_position > 30: player_position = 30", "- 1) + \"P\" + middle_dot * (30 - player_position) + \"GOAL\" )", "- computer_position) + \"GOAL\" ) if __name__ == \"__main__\": board() print(\"주사위게임 START\") while", "-> 16 * 11 + 7 -> 183 player_position = 1 computer_position =", "break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position >", "183 player_position = 1 computer_position = 1 def board(): print(middle_dot*(player_position - 1) +", "- 1) + \"C\" + middle_dot * (30 - computer_position) + \"GOAL\" )", "6) if computer_position > 30: computer_position = 30 board() if computer_position == 30:", "30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6)", "if __name__ == \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이", "+ middle_dot * (30 - computer_position) + \"GOAL\" ) if __name__ == \"__main__\":", "+ \"P\" + middle_dot * (30 - player_position) + \"GOAL\" ) print(middle_dot *", "1 def board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot * (30 -", "random middle_dot = chr(0xb7) # U+00B7 -> 16 * 11 + 7 ->", "누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position > 30: computer_position", "chr(0xb7) # U+00B7 -> 16 * 11 + 7 -> 183 player_position =", "* 11 + 7 -> 183 player_position = 1 computer_position = 1 def", "player_position > 30: player_position = 30 board() if player_position == 30: print(\"여러분의 승리\")", "\"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position +=", "-> 183 player_position = 1 computer_position = 1 def board(): print(middle_dot*(player_position - 1)", "if player_position > 30: player_position = 30 board() if player_position == 30: print(\"여러분의", "누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if player_position > 30: player_position", "= chr(0xb7) # U+00B7 -> 16 * 11 + 7 -> 183 player_position", "print(middle_dot * (computer_position - 1) + \"C\" + middle_dot * (30 - computer_position)", "print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6)", "승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position", "computer_position = 1 def board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot *", "+ \"C\" + middle_dot * (30 - computer_position) + \"GOAL\" ) if __name__", "+ middle_dot * (30 - player_position) + \"GOAL\" ) print(middle_dot * (computer_position -", "input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position > 30:", "여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if player_position > 30: player_position =", "board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot * (30 - player_position) +", "\"GOAL\" ) print(middle_dot * (computer_position - 1) + \"C\" + middle_dot * (30", "컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1, 6) if computer_position > 30: computer_position =", "import random middle_dot = chr(0xb7) # U+00B7 -> 16 * 11 + 7", "움직입니다.\") computer_position += random.randint(1, 6) if computer_position > 30: computer_position = 30 board()", "= 1 def board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot * (30", "\"P\" + middle_dot * (30 - player_position) + \"GOAL\" ) print(middle_dot * (computer_position", "16 * 11 + 7 -> 183 player_position = 1 computer_position = 1", "> 30: computer_position = 30 board() if computer_position == 30: print(\"컴퓨터의 승리\") break", "== \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position", "True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\") player_position += random.randint(1, 6) if player_position >", "1 computer_position = 1 def board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot", "== 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position += random.randint(1,", "30: player_position = 30 board() if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를", "random.randint(1, 6) if player_position > 30: player_position = 30 board() if player_position ==", "if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면 컴퓨터의 말이 움직입니다.\") computer_position", "computer_position += random.randint(1, 6) if computer_position > 30: computer_position = 30 board() if", "player_position) + \"GOAL\" ) print(middle_dot * (computer_position - 1) + \"C\" + middle_dot", "def board(): print(middle_dot*(player_position - 1) + \"P\" + middle_dot * (30 - player_position)", "__name__ == \"__main__\": board() print(\"주사위게임 START\") while True: input(\"Enter를 누르면 여러분의 말이 움직입니다.\")", "print(middle_dot*(player_position - 1) + \"P\" + middle_dot * (30 - player_position) + \"GOAL\"", "* (computer_position - 1) + \"C\" + middle_dot * (30 - computer_position) +", "11 + 7 -> 183 player_position = 1 computer_position = 1 def board():", "\"C\" + middle_dot * (30 - computer_position) + \"GOAL\" ) if __name__ ==", "middle_dot = chr(0xb7) # U+00B7 -> 16 * 11 + 7 -> 183", "player_position = 30 board() if player_position == 30: print(\"여러분의 승리\") break input(\"Enter를 누르면", "- player_position) + \"GOAL\" ) print(middle_dot * (computer_position - 1) + \"C\" +", "* (30 - player_position) + \"GOAL\" ) print(middle_dot * (computer_position - 1) +", "U+00B7 -> 16 * 11 + 7 -> 183 player_position = 1 computer_position", "(30 - player_position) + \"GOAL\" ) print(middle_dot * (computer_position - 1) + \"C\"" ]
[ "self.cur_event = \"\" self.event_args = () self.event_kwargs = {} async def send(self, data):", "event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs =", "self.cur_event = None self.event_args = () self.event_kwargs = {} async def change_presence(self, *,", "self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event", "*, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status, afk,", "= {} async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise", "data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args,", "is None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event =", "self.event_args = () self.event_kwargs = {} async def change_presence(self, *, activity=None, status=None, afk=False,", "callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args", "data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\") await", "self.cur_event = \"presence\" self.event_args = (activity, status, afk, since) await super().change_presence(activity=activity, status=status, afk=afk,", "FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = ()", "{} async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled", ". import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event =", "**kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs = {} async def send(self,", "= () self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event", "import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\"", "status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status, afk, since) await", "from . import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event", "= \"\" self.event_args = () self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send',", "class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args =", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs", "() self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is", "import discord.gateway as gateway from . import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args,", "*self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs = {} async def", "if self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs)", "gateway from . import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs =", "self.event_kwargs = {} async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event =", "since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status, afk, since) await super().change_presence(activity=activity, status=status,", "super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs = {} async def", "{} async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args", "self.event_args = () self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send', data) if", "**kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs = {} async", "afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status, afk, since) await super().change_presence(activity=activity,", "None self.event_args = () self.event_kwargs = {} async def change_presence(self, *, activity=None, status=None,", "() self.event_kwargs = {} async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event", "change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status,", "as gateway from . import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs): super().__init__(*args,", "**self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs = {} async def change_presence(self,", "= {} async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\"", "Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = ()", "raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args", "send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs", "= () self.event_kwargs = {} async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0):", "self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event,", "*args, **kwargs): super().__init__(*args, **kwargs) self.cur_event = \"\" self.event_args = () self.event_kwargs = {}", "discord.gateway as gateway from . import callbacks class FakeWebSocket(gateway.DiscordWebSocket): def __init__(self, *args, **kwargs):", "await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs = {}", "async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args =", "ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args =", "callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None self.event_args = () self.event_kwargs = {} async", "activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity, status, afk, since)", "None: raise ValueError(\"Unhandled Websocket send event\") await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs) self.cur_event = None", "= None self.event_args = () self.event_kwargs = {} async def change_presence(self, *, activity=None,", "def change_presence(self, *, activity=None, status=None, afk=False, since=0.0): self.cur_event = \"presence\" self.event_args = (activity,", "send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket send event\")", "def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket send", "self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None:", "async def send(self, data): self._dispatch('socket_raw_send', data) if self.cur_event is None: raise ValueError(\"Unhandled Websocket", "= \"presence\" self.event_args = (activity, status, afk, since) await super().change_presence(activity=activity, status=status, afk=afk, since=since)", "\"\" self.event_args = () self.event_kwargs = {} async def send(self, data): self._dispatch('socket_raw_send', data)" ]
[ "__init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column = column def", "= column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info", "skip: flag indicating whether no transformation shall be performed on all of the", "of creating a separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns", "pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for rule in self._userRules: matchingColumns =", "of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse:", "= name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame)", "not been set. \"\"\" return self._name def setName(self, name): self._name = name @abstractmethod", "self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace", "values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten() values =", "= drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep", "columns that are to be replaced by a list one-hot encoded columns each", "regex matching names of columns that are to be replaced by a list", "pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer):", "the possible values will be inferred from the columns :param inplace: whether to", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data", "replace the input columns by columns of the same name containing arrays as", "info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer):", "None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex =", "a data frame by applying a condition function to each row and retaining", "modified :param columnTransform: a function operating on single cells or a Numpy ufunc", "= DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is not", "arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular expression defining the column(s) the", "feature generator from which the rule originated was never applied in order to", "instance's default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param", "the same number of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self,", "case where useArrayValues=True); If None, then no columns are actually to be one-hot-encoded", "col, categories in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in state:", "self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns", "all columns :param inplace: whether to apply the transformation in-place :param arrayValued: whether", "to be learned for each of the columns for the case where the", "self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str):", "placeholder rule and the regex must be set later via setRegex or the", "(or an array-valued column for the case where useArrayValues=True); If None, then no", "= drop self.keep = keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df =", "log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame transformers,", "= transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\"", "self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the list of names of all", "self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series", "`transformer` nor `transformerInstance` are given, the containing instance's default factory will be used.", "whether the column values are not scalars but arrays (of arbitrary lengths). It", "single column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1))", "..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class", "shall be performed on all of the columns :param unsupported: flag indicating whether", "it returns True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column", "Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing)", "condition on the selected column and retaining only the rows for which it", "not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer", "__init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column = column def", "info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer):", "= rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c in matchingColumns: matchedRulesByColumn[c] =", "self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules = []", "= values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") #", "bool: return True def getName(self) -> str: \"\"\" :return: the name of this", "DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and retains only", "several models that use the same feature with associated rule/rule template (disabling `fit`", "factory for the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall", "arrays containing the possible values of each of the specified columns (for case", "self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if", "= None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to", "for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the list of", "transformerFactory: a factory for the generation of the transformer instance, which will only", "Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\"", "Set import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder", "column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def", "backwards compatibility with persisted DFTs based on code prior to commit 7088cbbe #", "df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer):", "__init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def", "columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column:", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df) return", "that are to be replaced by a list one-hot encoded columns each (or", "rule originated was never applied in order to have the rule instantiated.\") return", "self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str,", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values})", "self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self):", "to apply transformation not to scalar-valued columns but to one or more array-valued", "= set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following columns are", "matchedRulesByColumn) return df def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] =", "def info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info", "or the containing instance's default factory will be used. NOTE: Use an instance", "as values instead of creating a separate column per original value \"\"\" super().__init__()", "0: log.warning(f\"{self} does not apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'\")", "same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace =", "def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise", "info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by", "return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for", "length) are to be transformed in the same way. If multiple columns are", "columns :param unsupported: flag indicating whether normalisation of all columns is unsupported (shall", "and does not need to be fitted to data\"\"\" def _fit(self, df: pd.DataFrame):", "df[cols].values else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1))", "self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories:", "self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame", "str else keep self.drop = drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df", "unsupported=False). If None is given, either transformerFactory or the containing instance's default factory", "= self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str,", "selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts", "super().__init__() self.column = column self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "the selected column and retaining only the rows for which it returns True", "the column as a whole \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform", "def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation", "df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame by", "dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else:", "the template to a rule for all columns matching the regex :param regex:", "in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] =", "0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to {rule}.", "setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column = column def _apply(self, df: pd.DataFrame)", "Any]: d = super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] = self._rules else:", "info(self): info = super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies", "arrays belonging to a single row must all have the same length. \"\"\"", "function that takes a Numpy array and from which the returned value will", "def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the", "the set of rules; rules are always fitted and applied in the given", "df[series.name] = series return df def info(self): info = super().info() info[\"inplace\"] = self.inplace", "!= len(categories): raise ValueError(f\"Given categories must have the same length as columns to", "= df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"]", "class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a boolean function to", "to have the rule instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self, columns:", "value will be assigned to the column as a whole \"\"\" super().__init__() self.column", "in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not contain the", "if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the", "was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True)", "not in the setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop", "to apply the transformation in-place :param arrayValued: whether to apply transformation not to", "rows for which it returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__()", "Exception as e: raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self,", "compatibility with persisted DFTs based on code prior to commit 7088cbbe # They", "self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode = None", "data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] =", "for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0:", "the rule will not be applicable. :param skip: flag indicating whether no transformation", "skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\"", "= invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\"", "= self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if", "from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import flattenArguments from", "self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"]", "super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class", "df.drop(columns=self.drop) return df def info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"] =", "Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None)", "r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as e: raise Exception(f\"Could", "len(unhandledColumns) > 0: raise Exception(f\"The following columns are not handled by any rules:", "Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep]", "\"\"\" Adds a new column with counts of the values on a selected", "be fitted :param independentColumns: whether a separate transformation is to be learned for", "np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else: values =", "Sequence, Union, Dict, Callable, Any, Optional, Set import numpy as np import pandas", "handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df:", "= defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries()", "return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts of the", "column: str): if self.regex is None: raise Exception(\"Attempted to apply a placeholder rule.", "shall apply; if None, apply it to all columns :param inplace: whether to", "matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise", "pass def fit(self, df: pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def", "rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) >", "= copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as e:", "self.inplace: df = df.copy() cols = self.columns if cols is None: cols =", "a regular expression defining the column the rule applies to :return: the resulting", "raise Exception(f\"Array-valued case is only supported for a single column, matched {matchingColumns} for", "columns by retaining or dropping specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]]", "if self._columnsToEncode is None: self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex,", "info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self,", "not None: df = df.drop(columns=self.drop) return df def info(self): info = super().info() info[\"keep\"]", "applied to columns matched by such rules, unmatched columns will not be transformed.", "applying a boolean function to one of the columns and retaining only the", "apply each transformer in the chain receives the transformed output of its predecessor.", "which the value is in the setToKeep \"\"\" def __init__(self, column: str, setToKeep:", "if ignoreUnknown else \"error\" if categories is not None: if type(categories) == dict:", "self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the template to a", "if True and an unknown category is encountered during transform, the resulting one-hot", "transf in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] =", ":param arrayValued: whether the column values are not scalars but arrays (of arbitrary", "return self._name def setName(self, name): self._name = name @abstractmethod def _fit(self, df: pd.DataFrame):", "columns are matched by a rule :param inplace: whether to apply data frame", "= re.compile(regex) if regex is not None else None self.skip = skip self.unsupported", "not None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to", "transformation shall be performed on the matching column(s) :param unsupported: flag indicating whether", "function to each row and retaining only the rows for which it returns", "'column' using 'columnTransform'. This transformer can be used to utilise Numpy vectorisation for", "to which it applies (learning a single transformer based on the values of", "-> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward)", "setName(self, name): self._name = name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def", "= columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None if columns is", "DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame transformers, i.e. objects which can", "be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether the column", "fit: {rule} defines no transformer and instance has no transformer factory\") rule.transformer =", "aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self,", "columns in ascending order \"\"\" def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[sorted(df.columns)]", "for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) ==", "to a rule for all columns matching the regex :param regex: a regular", "else: if len(cols) == 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for", "\"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[],", "column: str, condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition = condition def", "one column. :param fit: whether the rule's transformer shall be fitted :param independentColumns:", "= super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] = self._rules else: d[\"userRules\"] =", "rows for which the value is in the setToKeep \"\"\" def __init__(self, column:", "__init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self,", "is supported, i.e. the regex must match at most one column. :param fit:", "to be one-hot-encoded :param categories: numpy arrays containing the possible values of each", "None is given, either transformerFactory or the containing instance's default factory will be", "self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode}", "originated was never applied in order to have the rule instantiated.\") return self.regex.fullmatch(column)", "can transform one data frame into another (possibly applying the transformation to the", "= self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying", "transform, the resulting one-hot encoded columns for this feature will be all zeros.", "is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for", "the original data frame - in-place transformation). A data frame transformer may require", "only if you want, in particular, the instance to be shared across several", "not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns:", "col, categories in categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given categories must", "pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"]", "self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns = independentColumns def", "for rule in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators:", "rule instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]): return [col", "which the function returns True \"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]):", "DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in ascending order \"\"\" def _apply(self,", "df = df.loc[self.keep] if self.drop is not None: df = df.drop(self.drop) return df", "given, either transformerFactory or the containing instance's default factory will be used. NOTE:", "DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a boolean function to one", "matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More", "info = super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a", "This transformer can be used to utilise Numpy vectorisation for performance optimisation. \"\"\"", "ignoreUnknown: if True and an unknown category is encountered during transform, the resulting", "rules that don't specify a particular transformer. The default transformer will only be", "A DataFrame transformer that filters columns by retaining or dropping specified columns \"\"\"", "not self.arrayValued: values = df[cols].values else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten())", "rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only supported for a", "__init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame) ->", "self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]):", "class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory:", "column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts =", "the values of all applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False,", "is not None: df = df[self.keep] if self.drop is not None: df =", "= columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown", "def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for rule in", "where each rule defines a set of columns to which it applies (learning", "unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False):", "applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no", "by such rules, unmatched columns will not be transformed. :param requireAllHandled: whether to", "else \"error\" if categories is not None: if type(categories) == dict: self.oneHotEncoders =", "in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for i", "series return df def info(self): info = super().info() info[\"inplace\"] = self.inplace return info", "[transf.getName() for transf in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames()", "a data frame's columns in ascending order \"\"\" def _apply(self, df: pd.DataFrame) ->", "Optional[str]): \"\"\" Convert the template to a rule for all columns matching the", "DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic", "Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) ->", "no transformer and instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit:", "= cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues =", "df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace: df = df.copy() cols", "column is supported, i.e. the regex must match at most one column. :param", "transformation shall apply; if None, apply it to all columns :param inplace: whether", "same normalisation process for each column) unless independentColumns=True. If None, the rule is", "pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns:", "if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not contain the same number", "to be fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self, df:", "re.compile(regex) def matches(self, column: str): if self.regex is None: raise Exception(\"Attempted to apply", "If multiple columns are transformed, then the arrays belonging to a single row", "return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c", "sklearn.preprocessing, e.g. StandardScaler) that shall be used to create a transformer for all", "df = df.drop(columns=self.drop) return df def info(self): info = super().info() info[\"keep\"] = self.keep", "condition: Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply", "which will only be applied if `transformer` is not given; if neither `transformer`", "= d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return True def", "re from abc import ABC, abstractmethod from typing import List, Sequence, Union, Dict,", "be used to utilise Numpy vectorisation for performance optimisation. \"\"\" def __init__(self, column:", "= super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info", "{} # arguments passed to init that are not saved otherwise can be", "df def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True)", "row in transformedValues] return df def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self,", "self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) ->", "(from sklearn.preprocessing) to use (which will be fitted & applied) :param columns: the", "SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag", "of the columns and retaining only the rows for which the function returns", "self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is not fitted: \" f\"the df", "= df[self.keep] if self.drop is not None: df = df.drop(columns=self.drop) return df def", "'columns') or dictionary mapping column name to array of possible categories for the", "does not need to be fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass", "unsupported: flag indicating whether normalisation of the matching column(s) is unsupported (shall trigger", "= {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if len(columns)", "DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop: Set = None): super().__init__() self.drop", "and instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit", "are to be replaced by a list one-hot encoded columns each (or an", "not be applicable. :param skip: flag indicating whether no transformation shall be performed", "[] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if", "1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a) for", "independentColumns=True. If None, the rule is a placeholder rule and the regex must", "= rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]:", "super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self,", "whether to apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace", "(from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s) for the case", "by retaining or dropping specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] =", "not to scalar-valued columns but to one or more array-valued columns, where the", "not None else None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted", "arrays as values instead of creating a separate column per original value \"\"\"", "encoded columns each (or an array-valued column for the case where useArrayValues=True); If", "multiple columns are transformed, then the arrays belonging to a single row must", "of names or regex matching names of columns that are to be replaced", "e.g. StandardScaler) that shall be used to create a transformer for all rules", "They lack the __isFitted attribute and we assume that each such DFT was", "if type(keep) == str else keep self.drop = drop def _apply(self, df: pd.DataFrame)", "used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether a separate transformation", "= [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self):", "super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame", "is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to any", "isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT", "and apply each transformer in the chain receives the transformed output of its", "init that are not saved otherwise can be persisted here # for backwards", "if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) >", "for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values =", "the case where a transformation is necessary (skip=False, unsupported=False). If None is given,", "appropriate). Otherwise, use a factory. :param transformerFactory: a factory for the generation of", "self.columns = columns self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"]", "row must all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer", "df = df.copy() cols = self.columns if cols is None: cols = df.columns", "to one or more array-valued columns, where the values of all arrays within", "of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers)", "super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)]", "return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol =", "in matchedRulesByColumn: raise Exception(f\"More than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\")", "a particular transformer. The default transformer will only be applied to columns matched", "flag indicating whether normalisation of the matching column(s) is unsupported (shall trigger an", "entries in such arrays are to be normalised in the same way. If", "the column to be modified :param columnTransform: a function that takes a Numpy", "is only supported for a single column, matched {matchingColumns} for {rule}\") values =", "d[\"userRules\"] = self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules", "\"error\" if categories is not None: if type(categories) == dict: self.oneHotEncoders = {col:", "df): return self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"]", "no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF =", "return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation =", "import pandas as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from", "matches(self, column: str): if self.regex is None: raise Exception(\"Attempted to apply a placeholder", "DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame) ->", "df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return", "applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG -", "setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column", "x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) == 1:", "if c in matchedRulesByColumn: raise Exception(f\"More than one rule applies to column '{c}':", "{self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None: if rule.transformerFactory is not None:", "applicable. :param skip: flag indicating whether no transformation shall be performed on the", "with counts of the values on a selected column \"\"\" def __init__(self, columnForEntryCount:", "Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft return None class", "selected column and retaining only the rows for which it returns True \"\"\"", "column specified by 'column' using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable,", "a new column with counts of the values on a selected column \"\"\"", "= self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame", "if not self.arrayValued: values = df[cols].values else: if len(cols) == 1: values =", "d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c for", "is unsupported (shall trigger an exception if attempted) :param transformer: a transformer instance", "If None, then no columns are actually to be one-hot-encoded :param categories: numpy", "None if columns is None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns)", "rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values)", "df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row in df.values] for iCol, col", "inverse: bool) -> pd.DataFrame: if not self.inplace: df = df.copy() cols = self.columns", "if not self.inplace: df = df.copy() cols = self.columns if cols is None:", "same way. If arrayValued is True, only a single matching column is supported,", "it applies to multiple columns, these columns will be normalised in the same", "\"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column =", "_toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def", "0: return df if not self.inplace: df = df.copy() for columnName in self._columnsToEncode:", "be modified :param columnTransform: a function that takes a Numpy array and from", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a", "lengths = [len(a) for a in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns", "for all columns matching the regex :param regex: a regular expression defining the", "None self.oneHotEncoders = None if columns is None: self._columnsToEncode = [] self._columnNameRegex =", "pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a", "= columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation)", "of column names to which the transformation shall apply; if None, apply it", "which it returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition =", "Set): super().__init__() self.setToDrop = setToDrop self.column = column def _apply(self, df: pd.DataFrame) ->", "each column) unless independentColumns=True. If None, the rule is a placeholder rule and", "using training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker:", "def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True) def", "category will raise an error. :param arrayValuedResult: whether to replace the input columns", "= arrayValued self.fit = fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self,", "placeholder rule. Perhaps the feature generator from which the rule originated was never", "= transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns =", "df def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return", "inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy()", "inplace: whether to apply the transformation in-place :param arrayValued: whether to apply transformation", "columns and retaining only the rows for which the function returns True \"\"\"", "chain of data frame transformers. During fit and apply each transformer in the", "state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return", "def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]:", "-> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame)", "case where the rule matches multiple columns. \"\"\" if skip and transformer is", ".sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import flattenArguments from ..util.pandas", "if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit: {rule} defines no transformer", "df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df) return df", "instance to be shared across several models that use the same feature with", "Modifies a column specified by 'column' using 'columnTransform'. This transformer can be used", "one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns)", "column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column = column def _apply(self,", "retaining only the rows for which the function returns True \"\"\" def __init__(self,", "all columns are matched by a rule :param inplace: whether to apply data", "one of the columns and retaining only the rows for which the function", "categories: numpy arrays containing the possible values of each of the specified columns", "self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False)", "DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a condition function to each", "is given, either transformerFactory or the containing instance's default factory will be used.", "df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a boolean function", "pd.DataFrame: df = df.copy() if self.keep is not None: df = df.loc[self.keep] if", "transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\"", "-> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer", "for which the function returns True \"\"\" def __init__(self, column: str, condition: Callable[[Any],", "fitted :param independentColumns: whether a separate transformation is to be learned for each", "\"\"\" :return: the list of names of all contained feature generators \"\"\" return", "applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None,", "def toRule(self, regex: Optional[str]): \"\"\" Convert the template to a rule for all", "df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:,", "fit: whether the rule's transformer shall be fitted :param independentColumns: whether a separate", "for col in cols] lengths = [len(a) for a in flatColArrays] if len(set(lengths))", "columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory:", "None: cols = df.columns if not self.arrayValued: values = df[cols].values else: if len(cols)", "for a single column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values =", "info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and", "def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame)", "and we assume that each such DFT was fitted def __setstate__(self, d): d[\"_name\"]", "rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]]", "a Numpy array and from which the returned value will be assigned to", "return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame by applying", "setToDrop self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def", "variables :param columns: list of names or regex matching names of columns that", "class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop: Set = None): super().__init__()", "applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0:", "type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in", "else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols)", "an array-valued column for the case where useArrayValues=True); If None, then no columns", "values of all applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer:", "skip and transformer is not None: raise ValueError(\"skip==True while transformer is not None\")", "transformed output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers", "self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def", "d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for rule", "independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False,", "= [] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns:", "transformer for all rules that don't specify a particular transformer. The default transformer", "columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class", "None\") self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory", "unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit = fit", "of the values on a selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts:", "None: cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda", ":param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which will be fitted", "= sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not rule.arrayValued:", "super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued =", "self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain of data frame", "categories is not None: if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False,", "`fit` where appropriate). Otherwise, use a factory. :param transformerFactory: a factory for the", "def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info", "= self.columns if cols is None: cols = df.columns if not self.arrayValued: values", "super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info", "columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self,", "df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return", "axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace:", "the instance to be shared across several models that use the same feature", "{rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of", "will be normalised in the same way (using the same normalisation process for", "ABC, abstractmethod from typing import List, Sequence, Union, Dict, Callable, Any, Optional, Set", "fit(self, df: pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT:", "len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to any columns, transformer has no", "def __init__(self, keep: Set = None, drop: Set = None): super().__init__() self.drop =", "# fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise", "Otherwise, use a factory. :param transformerFactory: a factory for the generation of the", "column the rule applies to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip,", "df def info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return", "class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame transformers, i.e. objects which", "flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a) for a in", "column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if", "= d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ =", "df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only supported", "d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\",", "the value is in the setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set):", "info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns by", "__setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]:", "class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation", "Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag indicating whether no transformation", "Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables", "the columns :param unsupported: flag indicating whether normalisation of all columns is unsupported", "length as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col,", "transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) ->", "rules; rules are always fitted and applied in the given order :param defaultTransformerFactory:", "aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame)", "rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory. :param transformerFactory: a", "learned for each of the columns for the case where the rule matches", "OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def", "a chain of data frame transformers. During fit and apply each transformer in", "return df def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace", "retaining or dropping specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None,", "be normalised in the same way. If arrayValued is True, only a single", "= super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls)", "in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row in df.values] for iCol,", "transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return", "in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param", "class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame)", "def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return", "None: if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col,", "transformed. :param requireAllHandled: whether to raise an exception if not all columns are", "not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df:", "apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot", "info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls):", "is not None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex = re.compile(regex)", "self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set", "which the value is not in the setToDrop \"\"\" def __init__(self, column: str,", "defining the column(s) the rule applies to. If it applies to multiple columns,", "= keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep", "True, only a single matching column is supported, i.e. the regex must match", "df = df.copy() if self.keep is not None: df = df.loc[self.keep] if self.drop", "= [keep] if type(keep) == str else keep self.drop = drop def _apply(self,", "not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns", "a function that takes a Numpy array and from which the returned value", "drop: Set = None): super().__init__() self.drop = drop self.keep = keep def _apply(self,", "values of all arrays within a column (which may vary in length) are", "a rule for all columns matching the regex :param regex: a regular expression", "single matching column is supported, i.e. the regex must match at most one", "a condition function to each row and retaining only the rows for which", "pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df) return df def", "each of the columns for the case where the rule matches multiple columns.", "may be a default name if the name has not been set. \"\"\"", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() for", "= values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten() values", "df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame", "def setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self, column: str): if self.regex", "d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if", "[col for col in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True,", "Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep] if type(keep) == str else", "the matching column(s) :param unsupported: flag indicating whether normalisation of the matching column(s)", "self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info = super().info()", "if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays", "column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform)", "Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old column names to new names", "applying the transformation to the original data frame - in-place transformation). A data", "-> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by", "use (which will be fitted & applied) :param columns: the set of column", "where appropriate). Otherwise, use a factory. :param transformerFactory: a factory for the generation", "def getName(self) -> str: \"\"\" :return: the name of this dft transformer, which", "None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables :param columns: list", "info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info def", "column to be modified :param columnTransform: a function operating on single cells or", "normalisation of all columns is unsupported (shall trigger an exception if attempted) :param", "Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] = self._rules", "self.inplace = inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def", "import copy import logging import re from abc import ABC, abstractmethod from typing", "info = super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a", "flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"]", "lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) ==", "= self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__()", "names to new names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame)", "import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for", "Series \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame)", "__init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition = condition", "for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except", "None): super().__init__() self.drop = drop self.keep = keep def _apply(self, df: pd.DataFrame) ->", "is encountered during transform, the resulting one-hot encoded columns for this feature will", "df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer):", "frame transformers, i.e. objects which can transform one data frame into another (possibly", "fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def", "are actually to be one-hot-encoded :param categories: numpy arrays containing the possible values", "encountered during transform, the resulting one-hot encoded columns for this feature will be", "& applied) :param columns: the set of column names to which the transformation", "the values of all arrays within a column (which may vary in length)", "pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified", "name. If None, the possible values will be inferred from the columns :param", "e.g. StandardScaler) to apply to the matching column(s) for the case where a", "info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by", "set later via setRegex or the rule will not be applicable. :param skip:", "super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals),", "pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which", "col in cols] lengths = [len(a) for a in flatColArrays] if len(set(lengths)) !=", "Dict, Callable, Any, Optional, Set import numpy as np import pandas as pd", "use the same feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise,", "unless independentColumns=True. If None, the rule is a placeholder rule and the regex", "return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop: Set =", "which can transform one data frame into another (possibly applying the transformation to", "= None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular expression defining the", "None, apply it to all columns :param inplace: whether to apply the transformation", "want to make use of these columns, transform them into a supported column", "will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether the", "-> pd.DataFrame: if not self.inplace: df = df.copy() for cg in self.columnGenerators: series", "DFTs based on code prior to commit 7088cbbe # They lack the __isFitted", "for col in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False):", "inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self,", "of a chain of data frame transformers. During fit and apply each transformer", "= \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame)", "if len(self._columnsToEncode) == 0: return df if not self.inplace: df = df.copy() for", "of the matching column(s) is unsupported (shall trigger an exception if attempted) :param", "None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular expression defining the column(s)", "copy import logging import re from abc import ABC, abstractmethod from typing import", "df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column", "None else None self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory", "return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None,", "SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker", "each transformer in the chain receives the transformed output of its predecessor. \"\"\"", "None, the possible values will be inferred from the columns :param inplace: whether", "no columns\") # collect specialised rule for application specialisedRule = copy.copy(rule) r =", "given order :param defaultTransformerFactory: a factory for the creation of transformer instances (from", "class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a vectorized condition on", "ValueError(f\"Columns {cols} do not contain the same number of values: {lengths}\") values =", "to perform the transformation in-place :param ignoreUnknown: if True and an unknown category", "info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation", "toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol", "info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a", "is not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No", "rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if", "pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] = self.column", "len(cols) == 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in", "be transformed. :param requireAllHandled: whether to raise an exception if not all columns", "\"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition", "requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\":", "setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols = self.columns if cols is", "shall be used to create a transformer for all rules that don't specify", "unmatched columns will not be transformed. :param requireAllHandled: whether to raise an exception", "\"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic is", "self.arrayValued = arrayValued self.fit = fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule,", "It is assumed that all entries in such arrays are to be normalised", "cols is None: cols = df.columns if not self.arrayValued: values = df[cols].values else:", "pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is not None: df =", "0: return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return", "Numpy array and from which the returned value will be assigned to the", "of) a data frame. If multiple columns are transformed, they are transformed independently", "of all arrays within a column (which may vary in length) are to", "sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s) for the case where", "trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False):", "== 0: log.warning(f\"{self} does not apply to any columns, transformer has no effect;", "== 0: return df if not self.inplace: df = df.copy() for columnName in", "pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info =", "we assume that each such DFT was fitted def __setstate__(self, d): d[\"_name\"] =", "want, in particular, the instance to be shared across several models that use", "of columns {matchingColumns} is unsupported according to {rule}. If you want to make", "columns\") # collect specialised rule for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns)", "DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to (a subset of the columns", "self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC):", "self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str,", "separate transformation is to be learned for each of the columns for the", "df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]: df =", "__isFitted attribute and we assume that each such DFT was fitted def __setstate__(self,", "matched by such rules, unmatched columns will not be transformed. :param requireAllHandled: whether", "matches multiple columns. \"\"\" if skip and transformer is not None: raise ValueError(\"skip==True", "columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace =", "need to be fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self,", "not need to be fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass def", "rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d", "= [c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if", "transformation shall be performed on all of the columns :param unsupported: flag indicating", "@abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\"", "of columns that are to be replaced by a list one-hot encoded columns", "performance optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\"", "c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues", "Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name of the column to be", "rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1:", "info = super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a", "assigned to the column as a whole \"\"\" super().__init__() self.column = column self.columnTransform", "by applying a vectorized condition on the selected column and retaining only the", "column and retains only the rows for which the value is in the", "encode categorical variables :param columns: list of names or regex matching names of", "DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace", "self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if categories is", "\"\"\" Supports the application of a chain of data frame transformers. During fit", "[c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode)", "self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if", "if self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(), } def fit(self, df:", "normalisation of the matching column(s) is unsupported (shall trigger an exception if attempted)", "arrays within a column (which may vary in length) are to be transformed", "transformation not to scalar-valued columns but to one or more array-valued columns, where", "During fit and apply each transformer in the chain receives the transformed output", "ColumnGenerator from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate", "pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace: df = df.copy() cols =", "\"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self):", "False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols = self.columns if cols", "instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be used to create a transformer", "df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df =", "to the matching column(s) for the case where a transformation is necessary (skip=False,", "ValueError(f\"Given categories must have the same length as columns to process\") self.oneHotEncoders =", "exception if not all columns are matched by a rule :param inplace: whether", "be used. NOTE: Use an instance only if you want, in particular, the", "possible values of each of the specified columns (for case where sequence is", "which it applies (learning a single transformer based on the values of all", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn =", "another (possibly applying the transformation to the original data frame - in-place transformation).", "Exception(f\"Array-valued case is only supported for a single column, matched {matchingColumns} for {rule}\")", "else: transformedValues = [transform(np.stack(row, axis=1)) for row in df.values] for iCol, col in", "len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df =", "= super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new", "can be used to utilise Numpy vectorisation for performance optimisation. \"\"\" def __init__(self,", "= values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths =", "of all applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol", "df.copy() matchedRulesByColumn = {} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns)", "compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns =", "columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self,", "d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None:", "Set): super().__init__() self.setToKeep = setToKeep self.column = column def _apply(self, df: pd.DataFrame) ->", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info", "def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name of", "else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else:", "{e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys())", "self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]:", "a rule :param inplace: whether to apply data frame transformations in-place \"\"\" super().__init__()", "not be transformed. :param requireAllHandled: whether to raise an exception if not all", "index=df.index) def info(self): info = super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer):", "def info(self): info = super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\"", "given; if neither `transformer` nor `transformerInstance` are given, the containing instance's default factory", "in the setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep =", "orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data", "categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given categories must have the same", "= df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return", "info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return", "# arguments passed to init that are not saved otherwise can be persisted", "if not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] =", "all zeros. if False, an unknown category will raise an error. :param arrayValuedResult:", "default transformer will only be applied to columns matched by such rules, unmatched", "import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import", "= list(encodedArray) return df def info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"]", "for each column) unless independentColumns=True. If None, the rule is a placeholder rule", "(which may vary in length) are to be transformed in the same way.", "arrayValuedResult=False): \"\"\" One hot encode categorical variables :param columns: list of names or", "for the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be", "\"\"\" Base class for data frame transformers, i.e. objects which can transform one", "categories in categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given categories must have", "column self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class", "def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) ->", "-> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] =", "length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace = inplace", "np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols]", "DataFrameTransformer which is not fitted: \" f\"the df transformer {self.getName()} requires fitting\") df", "for convenient construction options. :param independentColumns: whether a separate transformation is to be", "if cols is None: cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if", "vectorisation for performance optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series,", "self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def", "(skip=False, unsupported=False). If None is given, either transformerFactory or the containing instance's default", "= super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer", "here # for backwards compatibility with persisted DFTs based on code prior to", "(columnName, i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray) return df def info(self):", ":param regex: a regular expression defining the column(s) the rule applies to. If", "return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self,", "to (a subset of the columns of) a data frame. If multiple columns", "class for data frame transformers, i.e. objects which can transform one data frame", "..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string", "not self.inplace: df = df.copy() cols = self.columns if cols is None: cols", "super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return", "None): super().__init__() self.keep = [keep] if type(keep) == str else keep self.drop =", "Filters a data frame by applying a boolean function to one of the", "each of the specified columns (for case where sequence is specified in 'columns')", "= None, drop: Set = None): super().__init__() self.drop = drop self.keep = keep", "set of columns to which it applies (learning a single transformer based on", "df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer in", "d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"]", "= np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in", "= {} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0:", "a column specified by 'column' using 'columnTransform'. This transformer can be used to", "values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool)", "info(self): info = super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters", "df: pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer):", "self.regex = re.compile(regex) if regex is not None else None self.skip = skip", "df.copy() if self.keep is not None: df = df[self.keep] if self.drop is not", "applies to. If it applies to multiple columns, these columns will be normalised", "to the original data frame - in-place transformation). A data frame transformer may", "for data frame transformers, i.e. objects which can transform one data frame into", "in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the list of names of", "self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None))", "else None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True", "columns are transformed, then the arrays belonging to a single row must all", "raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to {rule}. If you want", "specified in 'columns') or dictionary mapping column name to array of possible categories", "the returned value will be assigned to the column as a whole \"\"\"", "for the generation of the transformer instance, which will only be applied if", "If None, the possible values will be inferred from the columns :param inplace:", "columnsMap: dictionary mapping old column names to new names \"\"\" super().__init__() self.columnsMap =", "factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether", "be shared across several models that use the same feature with associated rule/rule", "\"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which will be", "transformedValues] return df def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df): return", "self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "if not self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName,", "{lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) ->", "columns. \"\"\" if skip and (transformer is not None or transformerFactory is not", "one-hot-encoded :param categories: numpy arrays containing the possible values of each of the", "return True def getName(self) -> str: \"\"\" :return: the name of this dft", "applied in the given order :param defaultTransformerFactory: a factory for the creation of", "log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") # collect specialised rule for application", "getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation is the inverse", "= orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown =", "self.aggregation = aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer):", "transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\"", "to fit: {rule} defines no transformer and instance has no transformer factory\") rule.transformer", "= None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is", "df def info(self): info = super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer):", "super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) ->", "pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"] = self.decimals return", "whose logic is entirely based on rules and does not need to be", "_toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"]", "True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column", "replaced by a list one-hot encoded columns each (or an array-valued column for", "self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit: {rule} defines no transformer and", "subset of the columns of) a data frame. If multiple columns are transformed,", "such rules, unmatched columns will not be transformed. :param requireAllHandled: whether to raise", "for c in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns and", "matchedRulesByColumn = {} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) ==", "construction options. :param independentColumns: whether a separate transformation is to be learned for", "self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a", "{rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values", "columns but to one or more array-valued columns, where the values of all", "in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0]", "setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column = column def _apply(self, df: pd.DataFrame)", "def fit(self, df: pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self,", "OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import flattenArguments", "not None or transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory is not", "str: self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode =", "import ABC, abstractmethod from typing import List, Sequence, Union, Dict, Callable, Any, Optional,", "encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]):", "info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "in-place transformation). A data frame transformer may require being fitted using training data.", "arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self,", "the name has not been set. \"\"\" return self._name def setName(self, name): self._name", "\"\"\" Modifies a column specified by 'column' using 'columnTransform'. This transformer can be", "transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex =", "-> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info", "self.columns if cols is None: cols = df.columns if not self.arrayValued: values =", "= super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\"", "to {rule}. If you want to make use of these columns, transform them", "SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance", "return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic is entirely", "re.compile(r) except Exception as e: raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule)", "\"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None", "template to a rule for all columns matching the regex :param regex: a", "Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to {rule}. If you want to", "transformation rules, where each rule defines a set of columns to which it", "numpy arrays containing the possible values of each of the specified columns (for", "= [len(a) for a in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols}", "= self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\"", "c) is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to", "Sorts a data frame's columns in ascending order \"\"\" def _apply(self, df: pd.DataFrame)", "return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain of data", "rule and the regex must be set later via setRegex or the rule", "None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping", "raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if", "\"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]:", "will only be applied if `transformer` is not given; if neither `transformer` nor", "class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol]", "df = df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult:", "self._rules else: d[\"userRules\"] = self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn =", "requireAllHandled: whether to raise an exception if not all columns are matched by", "at most one column. :param fit: whether the rule's transformer shall be fitted", "process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)}", "regex: str): self.regex = re.compile(regex) def matches(self, column: str): if self.regex is None:", "or dropping specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None, drop:", ":param columns: list of names or regex matching names of columns that are", "values = values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten()", "List[str]: \"\"\" :return: the list of names of all contained feature generators \"\"\"", "-> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self,", "if self._rules is not None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return", "before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None: if rule.transformerFactory is", "columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else", "len(columns) != len(categories): raise ValueError(f\"Given categories must have the same length as columns", "instance's default factory will be used. NOTE: Use an instance only if you", "categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state)", "df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df if not self.inplace: df =", "rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to {rule}. If you", "None, then no columns are actually to be one-hot-encoded :param categories: numpy arrays", "cols] lengths = [len(a) for a in flatColArrays] if len(set(lengths)) != 1: raise", "\"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self, df:", "modified :param columnTransform: a function that takes a Numpy array and from which", "self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) == 1: c = cols[0] df[c]", "supported column before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None: if", "def info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info", "default factory will be used. NOTE: Use an instance only if you want,", "don't specify a particular transformer. The default transformer will only be applied to", "column: the name of the column to be modified :param columnTransform: a function", "(of arbitrary lengths). It is assumed that all entries in such arrays are", "the value is not in the setToDrop \"\"\" def __init__(self, column: str, setToDrop:", "defining the column the rule applies to :return: the resulting Rule \"\"\" return", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a", "more array-valued columns, where the values of all arrays within a column (which", "generator from which the rule originated was never applied in order to have", "{self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return {", "= self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self, df:", "as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import", "import setstate from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin):", "objects which can transform one data frame into another (possibly applying the transformation", "from typing import List, Sequence, Union, Dict, Callable, Any, Optional, Set import numpy", "values = applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else:", "If it applies to multiple columns, these columns will be normalised in the", "len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def", "be assigned to the column as a whole \"\"\" super().__init__() self.column = column", "self.arrayValued: values = df[cols].values else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values", "= None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep] if type(keep)", "if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories", "pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain of", "import re from abc import ABC, abstractmethod from typing import List, Sequence, Union,", "which the returned value will be assigned to the column as a whole", "create a transformer for all rules that don't specify a particular transformer. The", "data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def isFitted(self):", "= df.drop(columns=self.drop) return df def info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"]", "invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "= [row[:, iCol] for row in transformedValues] return df def _apply(self, df): return", "data frame - in-place transformation). A data frame transformer may require being fitted", "an entire Series \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self,", "rule in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator],", "self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "one or more array-valued columns, where the values of all arrays within a", "the containing instance's default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction", "pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]],", "arrays are to be normalised in the same way. If arrayValued is True,", "= self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for", "transformed, then the arrays belonging to a single row must all have the", "name): self._name = name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self,", "d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool:", "self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d =", "specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]]", "you want, in particular, the instance to be shared across several models that", "which is not fitted: \" f\"the df transformer {self.getName()} requires fitting\") df =", "Use an instance only if you want, in particular, the instance to be", "findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return rule", "make use of these columns, transform them into a supported column before applying", "= columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount:", "to a data frame by applying a set of transformation rules, where each", "df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted def fitApply(self, df:", "= self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\"", "None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult =", "None: df = df.loc[self.keep] if self.drop is not None: df = df.drop(self.drop) return", "\"$\" elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex", "rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c", "of possible categories for the column name. If None, the possible values will", "factory will be used. NOTE: Use an instance only if you want, in", "for the column name. If None, the possible values will be inferred from", "info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info class", "a set of transformation rules, where each rule defines a set of columns", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self):", "unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex:", "rule will not be applicable. :param skip: flag indicating whether no transformation shall", "columns are actually to be one-hot-encoded :param categories: numpy arrays containing the possible", "np.ufunc]): \"\"\" :param column: the name of the column to be modified :param", "self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\"", "iCol, col in enumerate(cols): df[col] = [row[:, iCol] for row in transformedValues] return", "regex must be set later via setRegex or the rule will not be", "def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in", "the same way. If multiple columns are transformed, then the arrays belonging to", "rules are always fitted and applied in the given order :param defaultTransformerFactory: a", "chain receives the transformed output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer,", "self._rules is not None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return d", "# for backwards compatibility with persisted DFTs based on code prior to commit", "specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as", "df def info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"]", "old column names to new names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self,", "set of column names to which the transformation shall apply; if None, apply", "self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer):", "a function operating on single cells or a Numpy ufunc that applies to", "each row and retaining only the rows for which it returns True \"\"\"", "and an unknown category is encountered during transform, the resulting one-hot encoded columns", "self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts of", "self._rules = [] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in", "a transformer whose (forward) transformation is the inverse transformation of this DFT \"\"\"", "no columns are actually to be one-hot-encoded :param categories: numpy arrays containing the", "d def _toStringExcludePrivate(self) -> bool: return True def getName(self) -> str: \"\"\" :return:", "\"\"\" Applies normalisation/scaling to a data frame by applying a set of transformation", "self.inplace: df = df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not", "info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on", "columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] =", "self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments passed to init that", "regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None,", "fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self)", "if regex is not None else None self.skip = skip self.unsupported = unsupported", "utilise Numpy vectorisation for performance optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray],", "df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled", "columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class", "return self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] =", "self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules", "not None: if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for", "these columns will be normalised in the same way (using the same normalisation", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0):", "df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set =", "return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators", "transform them into a supported column before applying {self.__class__.__name__}.\") if not rule.skip: if", "as a whole \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self,", "`SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether the column values are not", "the same length as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown)", "None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to any columns, transformer", "import List, Sequence, Union, Dict, Callable, Any, Optional, Set import numpy as np", "import logging import re from abc import ABC, abstractmethod from typing import List,", "abc import ABC, abstractmethod from typing import List, Sequence, Union, Dict, Callable, Any,", "matchingColumns(self, columns: Sequence[str]): return [col for col in columns if self.matches(col)] def __init__(self,", "of data frame transformers. During fit and apply each transformer in the chain", "info = super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self,", "if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str,", "to use (which will be fitted & applied) :param columns: the set of", "case where a transformation is necessary (skip=False, unsupported=False). If None is given, either", "-> pd.DataFrame: df = df.copy() if self.keep is not None: df = df[self.keep]", "for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return df def", "list one-hot encoded columns each (or an array-valued column for the case where", "= f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self,", "= sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self,", "A data frame transformer may require being fitted using training data. \"\"\" def", "arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if categories is not None:", "not self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)]", "the regex must be set later via setRegex or the rule will not", "df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of", "return df def info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown", "== 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten())", "or a Numpy ufunc that applies to an entire Series \"\"\" super().__init__() self.column", "function operating on single cells or a Numpy ufunc that applies to an", "-> pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self,", "the values on a selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str", "= columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series =", "in transformedValues] return df def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df):", "be used to create a transformer for all rules that don't specify a", "necessary (skip=False, unsupported=False). If None is given, either transformerFactory or the containing instance's", "def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns)", "rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c in matchingColumns: matchedRulesByColumn[c] = rule", "have the same length as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False,", "ABC): \"\"\"Base class for transformers whose logic is entirely based on rules and", "columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name of the column", "str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name of the", "normalisation process for each column) unless independentColumns=True. If None, the rule is a", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info()", "_toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"]", "raise Exception(f\"Cannot apply a DataFrameTransformer which is not fitted: \" f\"the df transformer", "cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues = [transform(np.stack(row,", "the columns of) a data frame. If multiple columns are transformed, they are", "of each of the specified columns (for case where sequence is specified in", "self.keep is not None: df = df.loc[self.keep] if self.drop is not None: df", "contain the same number of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def", "DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame by applying a set of", "rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued:", "== 0: continue for c in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip:", "setToKeep self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def", "= columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df", "of this dft transformer, which may be a default name if the name", "which may be a default name if the name has not been set.", "logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame transformers, i.e. objects", "arrayValuedResult: whether to replace the input columns by columns of the same name", "return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft,", "only the rows for which the function returns True \"\"\" def __init__(self, column:", "if categories is not None: if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)],", "len(self._columnsToEncode) == 0: return df if not self.inplace: df = df.copy() for columnName", "fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False,", "if not rule.skip: if rule.transformer is None: if rule.transformerFactory is not None: rule.transformer", "= df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only", "the rows for which the value is not in the setToDrop \"\"\" def", "self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if categories is not None: if", "in the given order :param defaultTransformerFactory: a factory for the creation of transformer", "\"\"\" One hot encode categorical variables :param columns: list of names or regex", "f\"the df transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def", "log.warning(f\"{self} does not apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if", "order to have the rule instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self,", "a separate transformation is to be learned for each of the columns for", "= df.copy() if self.keep is not None: df = df[self.keep] if self.drop is", "apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace", "= rule if not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns)", "None: df = df[self.keep] if self.drop is not None: df = df.drop(columns=self.drop) return", "scalar-valued columns but to one or more array-valued columns, where the values of", "..columngen import ColumnGenerator from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle", "\"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued", "self.drop is not None: df = df.drop(columns=self.drop) return df def info(self): info =", "Numpy ufunc that applies to an entire Series \"\"\" super().__init__() self.column = column", "-> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"]", "None: raise ValueError(\"skip==True while transformer is not None\") self.skip = skip self.unsupported =", "if len(cols) == 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x", "particular transformer. The default transformer will only be applied to columns matched by", "return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column", "or regex matching names of columns that are to be replaced by a", "for x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row in df.values]", "require being fitted using training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted", "== 0: return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self):", "feature generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def info(self): info =", "False, an unknown category will raise an error. :param arrayValuedResult: whether to replace", "to an entire Series \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def", "len(categories): raise ValueError(f\"Given categories must have the same length as columns to process\")", "None, independentColumns=False): \"\"\" :param skip: flag indicating whether no transformation shall be performed", "else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1,", "not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values)", "df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] =", "in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self)", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"]", "categories in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"]", "pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column'", "f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self, column:", "for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if c", "= None, independentColumns=False): \"\"\" :param skip: flag indicating whether no transformation shall be", "entirely based on rules and does not need to be fitted to data\"\"\"", "data frame. If multiple columns are transformed, they are transformed independently (i.e. each", "class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def", "_fit(self, df: pd.DataFrame): cols = self.columns if cols is None: cols = df.columns", "the setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep", "axis=1)) for row in df.values] for iCol, col in enumerate(cols): df[col] = [row[:,", "= state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols = self.columns", "-> List[str]: \"\"\" :return: the list of names of all contained feature generators", "is not None: df = df.drop(columns=self.drop) return df def info(self): info = super().info()", "= independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def", "an exception if not all columns are matched by a rule :param inplace:", "= [transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for", "df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df,", "Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name of the column to", "def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation is the", "from abc import ABC, abstractmethod from typing import List, Sequence, Union, Dict, Callable,", "multiple columns. \"\"\" if skip and (transformer is not None or transformerFactory is", "{column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]])", "Supports the application of a chain of data frame transformers. During fit and", "__init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name", "a data frame. If multiple columns are transformed, they are transformed independently (i.e.", "possible values will be inferred from the columns :param inplace: whether to perform", "info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A", "self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__()", "self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(), } def fit(self,", "the transformation in-place :param ignoreUnknown: if True and an unknown category is encountered", "application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception", "if `transformer` is not given; if neither `transformer` nor `transformerInstance` are given, the", "self.drop is not None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies", "d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return True def getName(self)", "be a default name if the name has not been set. \"\"\" return", "array-valued column for the case where useArrayValues=True); If None, then no columns are", "= df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i]", "else: df[columnName] = list(encodedArray) return df def info(self): info = super().info() info[\"inplace\"] =", "if self.drop is not None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\"", "__init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] =", "pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info()", "categories must have the same length as columns to process\") self.oneHotEncoders = {col:", "raise ValueError(f\"Columns {cols} do not contain the same number of values: {lengths}\") values", "each such DFT was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"]", "filters columns by retaining or dropping specified columns \"\"\" def __init__(self, keep: Union[str,", "pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame)", "len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only supported for a single column,", "the rule applies to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported,", "arrayValued is True, only a single matching column is supported, i.e. the regex", "isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class", "in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in", "match at most one column. :param fit: whether the rule's transformer shall be", "operating on single cells or a Numpy ufunc that applies to an entire", "if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators", "names to which the transformation shall apply; if None, apply it to all", "info(self): info = super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds", "unknown category is encountered during transform, the resulting one-hot encoded columns for this", "pd.DataFrame: if not self.inplace: df = df.copy() cols = self.columns if cols is", "on rules and does not need to be fitted to data\"\"\" def _fit(self,", "super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters", "= applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG", "the input columns by columns of the same name containing arrays as values", "is the inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC):", "rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit:", "- in-place transformation). A data frame transformer may require being fitted using training", "def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo", "Filters a data frame by applying a condition function to each row and", "= applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched", "{matchingColumns} is unsupported according to {rule}. If you want to make use of", "function returns True \"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column", "if self.regex is None: raise Exception(\"Attempted to apply a placeholder rule. Perhaps the", "the transformer instance, which will only be applied if `transformer` is not given;", "shared across several models that use the same feature with associated rule/rule template", "values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else:", "associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory. :param transformerFactory:", "{unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not", "on single cells or a Numpy ufunc that applies to an entire Series", "columns, where the values of all arrays within a column (which may vary", "None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"]", "transformer shall be fitted :param independentColumns: whether a separate transformation is to be", "__init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False):", "no transformation shall be performed on all of the columns :param unsupported: flag", "for backwards compatibility with persisted DFTs based on code prior to commit 7088cbbe", ":return: the list of names of all contained feature generators \"\"\" return [transf.getName()", "pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame)", "class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a condition function to", "and applied in the given order :param defaultTransformerFactory: a factory for the creation", "counts of the values on a selected column \"\"\" def __init__(self, columnForEntryCount: str,", "a single matching column is supported, i.e. the regex must match at most", "separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories", "class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform' \"\"\" def", "(using the same normalisation process for each column) unless independentColumns=True. If None, the", "matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not", "the rows for which the function returns True \"\"\" def __init__(self, column: str,", "of the specified columns (for case where sequence is specified in 'columns') or", "column name. If None, the possible values will be inferred from the columns", "name of this dft transformer, which may be a default name if the", "for a in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not", "same name containing arrays as values instead of creating a separate column per", "persisted here # for backwards compatibility with persisted DFTs based on code prior", "None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep] if type(keep) ==", "self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) == 1: c", "a data frame by applying a set of transformation rules, where each rule", "bool) -> pd.DataFrame: if not self.inplace: df = df.copy() cols = self.columns if", "rule.matchingColumns(df.columns) for c in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than one", "OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if len(columns) != len(categories):", "then no columns are actually to be one-hot-encoded :param categories: numpy arrays containing", "df = df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series", "\"\"\" :param column: the name of the column to be modified :param columnTransform:", "= rule.matchingColumns(df.columns) for c in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than", "info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]:", "whether to replace the input columns by columns of the same name containing", "= df.columns if not self.arrayValued: values = df[cols].values else: if len(cols) == 1:", "this dft transformer, which may be a default name if the name has", "info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class", "inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which", "List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries()", "np.ndarray]]): \"\"\" :param column: the name of the column to be modified :param", "factory for the generation of the transformer instance, which will only be applied", "\"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition", "sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which will be fitted &", "if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to any columns, transformer has", "See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether a separate transformation is", "= self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data", "the rule is a placeholder rule and the regex must be set later", "a in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not contain", "all columns is unsupported (shall trigger an exception if attempted) :param transformer: a", "are to be normalised in the same way. If arrayValued is True, only", "data frame by applying a set of transformation rules, where each rule defines", ":param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the", "that shall be used to create a transformer for all rules that don't", "class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to (a subset of the", "None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to", "def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) ->", "One hot encode categorical variables :param columns: list of names or regex matching", "(transformer is not None or transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory", "can be persisted here # for backwards compatibility with persisted DFTs based on", "from ..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class", "str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self,", "Any]: d = super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return", "self.keep is not None: df = df[self.keep] if self.drop is not None: df", "return [col for col in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None,", "condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\"", "'{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) -", "state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols = self.columns if", "__init__(self, keep: Set = None, drop: Set = None): super().__init__() self.drop = drop", "= True def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df)", "-> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column", "def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer:", "only a single matching column is supported, i.e. the regex must match at", "where the rule matches multiple columns. \"\"\" if skip and (transformer is not", "{matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation", "sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\"", "== str else keep self.drop = drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "= cg.generateColumn(df) df[series.name] = series return df def info(self): info = super().info() info[\"inplace\"]", "\"\"\" if skip and transformer is not None: raise ValueError(\"skip==True while transformer is", "transformer from sklearn.preprocessing to (a subset of the columns of) a data frame.", "def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace: df =", "on the selected column and retaining only the rows for which it returns", "= re.compile(r) except Exception as e: raise Exception(f\"Could not compile regex '{r}': {e}\")", "pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] =", "per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None", "(shall trigger an exception if attempted) :param transformer: a transformer instance (from sklearn.preprocessing,", "transformation is necessary (skip=False, unsupported=False). If None is given, either transformerFactory or the", "that applies to an entire Series \"\"\" super().__init__() self.column = column self.columnTransform =", "transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag indicating whether no", "c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) == 0:", "StandardScaler) to apply to the matching column(s) for the case where a transformation", "values of each of the specified columns (for case where sequence is specified", "d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d", "no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown)", "column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info =", "flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import orRegexGroup,", "info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to (a subset of", "1: raise Exception(f\"Array-valued case is only supported for a single column, matched {matchingColumns}", "info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__", "rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace:", "def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None,", "while transformer is not None\") self.skip = skip self.unsupported = unsupported self.transformer =", "self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def", "rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of rules; rules", "a selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__()", "c in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns and not", "df): return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info", "self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if c in matchedRulesByColumn: raise", "(which will be fitted & applied) :param columns: the set of column names", "then the arrays belonging to a single row must all have the same", "info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult", "drop: Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep] if type(keep) == str", "super().__init__() self.keep = [keep] if type(keep) == str else keep self.drop = drop", "\"\"\" Filters a data frame by applying a boolean function to one of", "unknown category will raise an error. :param arrayValuedResult: whether to replace the input", "must have the same length as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)],", "else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit: {rule} defines no", "column. :param fit: whether the rule's transformer shall be fitted :param independentColumns: whether", ":param inplace: whether to apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled =", "in self.dataFrameTransformers: if isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self,", "return df def info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop", "not apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is", "regular expression defining the column(s) the rule applies to. If it applies to", "super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns and not rule.arrayValued:", "sequence is specified in 'columns') or dictionary mapping column name to array of", "arrayValued self.fit = fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state,", "# collect specialised rule for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try:", "def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column = column", "transformedValues = [transform(np.stack(row, axis=1)) for row in df.values] for iCol, col in enumerate(cols):", "column names to new names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df:", "used to utilise Numpy vectorisation for performance optimisation. \"\"\" def __init__(self, column: str,", "np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not", "i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else: df[columnName] =", "is None: if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory", "used. NOTE: Use an instance only if you want, in particular, the instance", "__init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep", "Any, Optional, Set import numpy as np import pandas as pd from sklearn.preprocessing", "for which the value is in the setToKeep \"\"\" def __init__(self, column: str,", "retains only the rows for which the value is in the setToKeep \"\"\"", "based on code prior to commit 7088cbbe # They lack the __isFitted attribute", "not fitted: \" f\"the df transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df)", "Union[Callable, np.ufunc]): \"\"\" :param column: the name of the column to be modified", "pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df:", "1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for", "columns are not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def", "Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame):", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the", "If arrayValued is True, only a single matching column is supported, i.e. the", "if not self.inplace: df = df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df)", "that are not saved otherwise can be persisted here # for backwards compatibility", "pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator", "if len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df)", "of the columns :param unsupported: flag indicating whether normalisation of all columns is", "is not None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return d def", "super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use", "only the rows for which it returns True \"\"\" def __init__(self, column: str,", "arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which will", "arrayValued: whether the column values are not scalars but arrays (of arbitrary lengths).", "else: d[\"userRules\"] = self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {}", "df if not self.inplace: df = df.copy() for columnName in self._columnsToEncode: encodedArray =", "cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return df def info(self):", "a single row must all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer", "columns each (or an array-valued column for the case where useArrayValues=True); If None,", "a regular expression defining the column(s) the rule applies to. If it applies", "\"\"\" Filters a data frame by applying a condition function to each row", "c in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than one rule applies", "across several models that use the same feature with associated rule/rule template (disabling", "columns will not be transformed. :param requireAllHandled: whether to raise an exception if", "a single transformer based on the values of all applicable columns) \"\"\" class", "transformation is to be learned for each of the columns for the case", "the transformed output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__()", "being fitted using training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted =", "pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df)", "\"\"\"Base class for transformers whose logic is entirely based on rules and does", "return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column", "of names of all contained feature generators \"\"\" return [transf.getName() for transf in", "applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a", "= skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns =", "will be fitted & applied) :param columns: the set of column names to", "not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) == 1: c = cols[0]", "return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a vectorized", "self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return", "super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory", "f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments", "in order to have the rule instantiated.\") return self.regex.fullmatch(column) is not None def", "if not all columns are matched by a rule :param inplace: whether to", "all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the list", "self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit", "a set of columns to which it applies (learning a single transformer based", "for the case where useArrayValues=True); If None, then no columns are actually to", "if the name has not been set. \"\"\" return self._name def setName(self, name):", "@abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame) ->", "values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\")", "is not None or transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory is", "unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param", "column uses a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]]", "shall be performed on the matching column(s) :param unsupported: flag indicating whether normalisation", "# They lack the __isFitted attribute and we assume that each such DFT", "matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns:", "Set = None): super().__init__() self.drop = drop self.keep = keep def _apply(self, df:", "to init that are not saved otherwise can be persisted here # for", "If None is given, either transformerFactory or the containing instance's default factory will", "self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] =", "len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not contain the same number of", "self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return True def getName(self) -> str:", "have the rule instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]):", "transformer. The default transformer will only be applied to columns matched by such", "= re.compile(regex) def matches(self, column: str): if self.regex is None: raise Exception(\"Attempted to", "only the rows for which the value is in the setToKeep \"\"\" def", "lengths). It is assumed that all entries in such arrays are to be", "not None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return d def _fit(self,", "will be used. NOTE: Use an instance only if you want, in particular,", "applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None: if rule.transformerFactory is not", "self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for i in", "the given order :param defaultTransformerFactory: a factory for the creation of transformer instances", "transformer, which may be a default name if the name has not been", "Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame)", "column and retaining only the rows for which it returns True \"\"\" def", "and the regex must be set later via setRegex or the rule will", "set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following columns are not", "the columns for the case where the rule matches multiple columns. \"\"\" if", "Numpy vectorisation for performance optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence,", "1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]] else:", "state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries()", "self.inplace: df = df.copy() matchedRulesByColumn = {} for rule in self._rules: matchingColumns =", "self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft", "columns: list of names or regex matching names of columns that are to", "DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a vectorized condition on the", "self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to (a", "return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop", "c in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:,", "transformation in-place :param ignoreUnknown: if True and an unknown category is encountered during", "on the selected column and retains only the rows for which the value", "columns self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\",", "def __setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def", "data frame transformers. During fit and apply each transformer in the chain receives", "specified by 'column' using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]):", "type(columns) == str: self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns)", "transformation is the inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer,", "self.keep = keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if", "column (which may vary in length) are to be transformed in the same", "in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray)", "normalisation/scaling to a data frame by applying a set of transformation rules, where", "names or regex matching names of columns that are to be replaced by", "df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info =", "Callable, Any, Optional, Set import numpy as np import pandas as pd from", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a", "the name of the column to be modified :param columnTransform: a function that", "self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\"", "used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether the column values", "None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return d def _fit(self, df:", "commit 7088cbbe # They lack the __isFitted attribute and we assume that each", ":param column: the name of the column to be modified :param columnTransform: a", "def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\"", "and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in", "a column specified by 'column' using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform:", "if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex:", "if isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str,", "cols = df.columns if not self.arrayValued: values = df[cols].values else: if len(cols) ==", "the rule instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]): return", "def applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"] =", "the list of names of all contained feature generators \"\"\" return [transf.getName() for", "rule applies to. If it applies to multiple columns, these columns will be", "columns are transformed, they are transformed independently (i.e. each column uses a separately", "fitted & applied) :param columns: the set of column names to which the", "whether no transformation shall be performed on all of the columns :param unsupported:", "Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df:", "column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column = column def _apply(self,", "return d def setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self, column: str):", "to scalar-valued columns but to one or more array-valued columns, where the values", "requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of rules; rules are always fitted", "same number of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df:", "df.copy() if self.keep is not None: df = df.loc[self.keep] if self.drop is not", "the same feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use", "vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info =", "will not be applicable. :param skip: flag indicating whether no transformation shall be", "be all zeros. if False, an unknown category will raise an error. :param", "= df.copy() cols = self.columns if cols is None: cols = df.columns transform", "pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c for c in df.columns if", "return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in ascending order", "info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns by retaining or", "and retaining only the rows for which it returns True \"\"\" def __init__(self,", "df.loc[self.keep] if self.drop is not None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer):", "based on the values of all applicable columns) \"\"\" class RuleTemplate: def __init__(self,", "0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info =", "to each row and retaining only the rows for which it returns True", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn", "applies to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory,", "= self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying", "self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep]", "only the rows for which it returns True \"\"\" def __init__(self, condition: Callable[[Any],", "you want to make use of these columns, transform them into a supported", "+ [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is", "if None, apply it to all columns :param inplace: whether to apply the", "df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def", "pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() for cg in self.columnGenerators:", "(from sklearn.preprocessing, e.g. StandardScaler) that shall be used to create a transformer for", "InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df)", "selected column and retains only the rows for which the value is not", "all of the columns :param unsupported: flag indicating whether normalisation of all columns", "to be shared across several models that use the same feature with associated", "= rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit: {rule}", "pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn = {} for", "Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df:", "return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the", "else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") # collect specialised rule for", "series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info()", "not None\") self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory =", "of transformation rules, where each rule defines a set of columns to which", "dropping specified columns \"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str,", "super().__init__() self.setToKeep = setToKeep self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c for c in df.columns", "never applied in order to have the rule instantiated.\") return self.regex.fullmatch(column) is not", "DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old column", "skip: flag indicating whether no transformation shall be performed on the matching column(s)", "columnTransform: a function that takes a Numpy array and from which the returned", "else: if len(columns) != len(categories): raise ValueError(f\"Given categories must have the same length", "d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules return d def _fit(self, df: pd.DataFrame):", "whose (forward) transformation is the inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self)", "transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for", "no transformation shall be performed on the matching column(s) :param unsupported: flag indicating", "column specified by 'column' using 'columnTransform'. This transformer can be used to utilise", "None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName", "df: pd.DataFrame): cols = self.columns if cols is None: cols = df.columns if", "self._name def setName(self, name): self._name = name @abstractmethod def _fit(self, df: pd.DataFrame): pass", "DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def", "frame. If multiple columns are transformed, they are transformed independently (i.e. each column", "import numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder from", "= {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in self._columnsToEncode:", "instance only if you want, in particular, the instance to be shared across", "any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "[transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row", "_apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace: df = df.copy()", "def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None): super().__init__()", "__init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def", "rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace", ":param columnTransform: a function that takes a Numpy array and from which the", "import ColumnGenerator from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import", "def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info", "_fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def isFitted(self): return True", "in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c in", "InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) ->", "columnTransform: a function operating on single cells or a Numpy ufunc that applies", "flag indicating whether normalisation of all columns is unsupported (shall trigger an exception", "be modified :param columnTransform: a function operating on single cells or a Numpy", "in df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self}", "See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether the column values are", "= d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) ->", "info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing", "pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose", "to multiple columns, these columns will be normalised in the same way (using", "transformers, i.e. objects which can transform one data frame into another (possibly applying", "super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex", "belonging to a single row must all have the same length. \"\"\" super().__init__()", "self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {}", "..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC,", "columns is None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns) == str:", "<reponame>schroedk/sensAI<gh_stars>0 import copy import logging import re from abc import ABC, abstractmethod from", "for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info = super().info()", "a placeholder rule and the regex must be set later via setRegex or", "self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer):", "= self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\":", "info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with", "indicating whether normalisation of the matching column(s) is unsupported (shall trigger an exception", "transformer in self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if", "defines no transformer and instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if", "either transformerFactory or the containing instance's default factory will be used. NOTE: Use", "frame on the selected column and retains only the rows for which the", "def __init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "of all columns is unsupported (shall trigger an exception if attempted) :param transformer:", "the __isFitted attribute and we assume that each such DFT was fitted def", "type(keep) == str else keep self.drop = drop def _apply(self, df: pd.DataFrame) ->", "Applies normalisation/scaling to a data frame by applying a set of transformation rules,", "training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker]", "transformer based on the values of all applicable columns) \"\"\" class RuleTemplate: def", "not None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex = re.compile(regex) if", "\"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap)", "attempted) :param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to", "specialised rule for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex =", "for iCol, col in enumerate(cols): df[col] = [row[:, iCol] for row in transformedValues]", "an exception if attempted) :param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler)", "self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown", "info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName):", "df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not", "matchedRulesByColumn: raise Exception(f\"More than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c]", "one-hot encoded columns for this feature will be all zeros. if False, an", "return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"] = self.decimals", "transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert", "performed on all of the columns :param unsupported: flag indicating whether normalisation of", "{} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue", "None, drop: Set = None): super().__init__() self.drop = drop self.keep = keep def", "unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following columns", "not self.inplace: df = df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name]", "self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return", "self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if", "a placeholder rule. Perhaps the feature generator from which the rule originated was", "= \"ignore\" if ignoreUnknown else \"error\" if categories is not None: if type(categories)", "info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns", "class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts of the values on", "is not None else None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df)", "= columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not", "the case where the rule matches multiple columns. \"\"\" if skip and (transformer", "_apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df if not self.inplace: df", "is not in the setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__()", "and from which the returned value will be assigned to the column as", "def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]:", "i.e. objects which can transform one data frame into another (possibly applying the", "where useArrayValues=True); If None, then no columns are actually to be one-hot-encoded :param", "feature will be all zeros. if False, an unknown category will raise an", "__init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of rules;", "\"\"\" Filters a data frame by applying a vectorized condition on the selected", "applying a condition function to each row and retaining only the rows for", "a vectorized condition on the selected column and retaining only the rows for", "return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform'.", "applying a set of transformation rules, where each rule defines a set of", "transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip:", "= (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued:", "in the setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop =", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies", "such DFT was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] =", "to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if", "info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info class", "inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if categories", "Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "Sequence[str]): return [col for col in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule],", "str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column = column def _apply(self, df:", "logging import re from abc import ABC, abstractmethod from typing import List, Sequence,", "matching column is supported, i.e. the regex must match at most one column.", "transformer can be used to utilise Numpy vectorisation for performance optimisation. \"\"\" def", "independently (i.e. each column uses a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer:", "resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return", "value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders =", "self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a", "= [transform(np.stack(row, axis=1)) for row in df.values] for iCol, col in enumerate(cols): df[col]", "of rules; rules are always fitted and applied in the given order :param", "column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if rule.unsupported:", "matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for", "sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not rule.arrayValued: df[c]", "= column self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)]", "will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether a", "_toStringExcludePrivate(self) -> bool: return True def getName(self) -> str: \"\"\" :return: the name", "if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None:", "-> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d =", "try: specialisedRule.regex = re.compile(r) except Exception as e: raise Exception(f\"Could not compile regex", "columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\"", "entire Series \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df:", "predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self):", "transformer instance, which will only be applied if `transformer` is not given; if", "is assumed that all entries in such arrays are to be normalised in", "raise ValueError(\"skip==True while transformer is not None\") self.skip = skip self.unsupported = unsupported", "not self.inplace: df = df.copy() matchedRulesByColumn = {} for rule in self._rules: matchingColumns", "for c in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] =", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__()", "for the case where the rule matches multiple columns. \"\"\" if skip and", "info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a condition function", "inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class", "single cells or a Numpy ufunc that applies to an entire Series \"\"\"", "def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is not None:", "Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame) ->", "columns for this feature will be all zeros. if False, an unknown category", "-> pd.DataFrame: df = df.copy() if self.keep is not None: df = df.loc[self.keep]", "the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace", "for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for", "to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns)", "models that use the same feature with associated rule/rule template (disabling `fit` where", "the transformation to the original data frame - in-place transformation). A data frame", "rule defines a set of columns to which it applies (learning a single", "class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None,", "def setName(self, name): self._name = name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod", "\"\"\" :param rules: the set of rules; rules are always fitted and applied", "unsupported: flag indicating whether normalisation of all columns is unsupported (shall trigger an", "in such arrays are to be normalised in the same way. If arrayValued", "0] for x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row in", "is None: raise Exception(f\"No transformer to fit: {rule} defines no transformer and instance", "a transformer from sklearn.preprocessing to (a subset of the columns of) a data", "True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition def _apply(self,", "self.drop = drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if", "all columns matching the regex :param regex: a regular expression defining the column", "the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self):", "the rows for which the value is in the setToKeep \"\"\" def __init__(self,", "return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a condition", "number of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame,", "vary in length) are to be transformed in the same way. If multiple", "df.values] for iCol, col in enumerate(cols): df[col] = [row[:, iCol] for row in", "dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def", "in-place :param ignoreUnknown: if True and an unknown category is encountered during transform,", "ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex = re.compile(regex) if regex is not", "uses a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] =", "column(s) for the case where a transformation is necessary (skip=False, unsupported=False). If None", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() for cg", "condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition = condition def _apply(self, df:", "otherwise can be persisted here # for backwards compatibility with persisted DFTs based", "info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that", "the transformation in-place :param arrayValued: whether to apply transformation not to scalar-valued columns", "to the column as a whole \"\"\" super().__init__() self.column = column self.columnTransform =", "drop self.keep = keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy()", "defaultTransformerFactory: a factory for the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler)", "class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old", "transformed in the same way. If multiple columns are transformed, then the arrays", "error. :param arrayValuedResult: whether to replace the input columns by columns of the", "if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x", "and (transformer is not None or transformerFactory is not None): raise ValueError(\"skip==True while", "import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin log =", "the generation of the transformer instance, which will only be applied if `transformer`", "_fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]: df", "info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in", "not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply to any columns,", "= rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn)", "str): if self.regex is None: raise Exception(\"Attempted to apply a placeholder rule. Perhaps", "rule's transformer shall be fitted :param independentColumns: whether a separate transformation is to", "= self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString()", "self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None if columns", "whether no transformation shall be performed on the matching column(s) :param unsupported: flag", "to be modified :param columnTransform: a function operating on single cells or a", "keep self.drop = drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy()", "default name if the name has not been set. \"\"\" return self._name def", "columns to which it applies (learning a single transformer based on the values", "case is only supported for a single column, matched {matchingColumns} for {rule}\") values", "df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray) return df", "the rows for which it returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]):", "column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name of the column", "\"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None", "def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info", "instantiated.\") return self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]): return [col for", "[len(a) for a in flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do", "function to one of the columns and retaining only the rows for which", "self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the", "Exception(f\"The following columns are not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str,", "pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] = self.column", "rows for which it returns True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series],", "pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] = self.column", "fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\": self.getName(),", "it to all columns :param inplace: whether to apply the transformation in-place :param", "self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount", "None self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory", "transformer in the chain receives the transformed output of its predecessor. \"\"\" def", "as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories", "apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None:", "from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin", "super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters", "whether normalisation of all columns is unsupported (shall trigger an exception if attempted)", "def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer:", "= d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] =", "perform the transformation in-place :param ignoreUnknown: if True and an unknown category is", "list of names of all contained feature generators \"\"\" return [transf.getName() for transf", "df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform'", "specified by 'column' using 'columnTransform'. This transformer can be used to utilise Numpy", "columns by columns of the same name containing arrays as values instead of", "def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False,", "d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__", "-> bool: return True def getName(self) -> str: \"\"\" :return: the name of", "def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers)", "them into a supported column before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer", "df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if", "= \"$\" elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode = None else:", "using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column:", "transformation). A data frame transformer may require being fitted using training data. \"\"\"", "that takes a Numpy array and from which the returned value will be", "= f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} #", "arguments passed to init that are not saved otherwise can be persisted here", "keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep =", "fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued", "zeros. if False, an unknown category will raise an error. :param arrayValuedResult: whether", "1, f\"{rule} matched no columns\") # collect specialised rule for application specialisedRule =", "output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers =", "in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False):", "cells or a Numpy ufunc that applies to an entire Series \"\"\" super().__init__()", "default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued:", "True def getName(self) -> str: \"\"\" :return: the name of this dft transformer,", "= skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued =", "to commit 7088cbbe # They lack the __isFitted attribute and we assume that", "inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables :param columns: list of", "self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace", "contained feature generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def info(self): info", "condition function to each row and retaining only the rows for which it", "column) unless independentColumns=True. If None, the rule is a placeholder rule and the", "pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT", "\"\"\" :param skip: flag indicating whether no transformation shall be performed on all", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts:", "to be modified :param columnTransform: a function that takes a Numpy array and", "info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable):", "than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if", "self.drop = drop self.keep = keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df", "pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self):", "are to be transformed in the same way. If multiple columns are transformed,", "be fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame):", "% (columnName, i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray) return df def", "been set. \"\"\" return self._name def setName(self, name): self._name = name @abstractmethod def", "= self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer):", "super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "specified columns (for case where sequence is specified in 'columns') or dictionary mapping", "-> str: \"\"\" :return: the name of this dft transformer, which may be", "self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self):", "matchedRulesByColumn = {} self._rules = [] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns)", "where a transformation is necessary (skip=False, unsupported=False). If None is given, either transformerFactory", "self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state)", "= {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def __setstate__(self,", "applies to an entire Series \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform", "-> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft return None", "setstate from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\"", "def info(self): info = super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\"", "List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame)", "for transf in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"]", "rules, where each rule defines a set of columns to which it applies", "frame by applying a set of transformation rules, where each rule defines a", "def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount", "rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if", "frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules =", "if skip and (transformer is not None or transformerFactory is not None): raise", "None: if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is", "the column(s) the rule applies to. If it applies to multiple columns, these", "__setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\",", "whether to raise an exception if not all columns are matched by a", "return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer,", "= rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values)", "DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns by retaining or dropping specified", "= aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def", "are transformed, they are transformed independently (i.e. each column uses a separately trained", "matched by a rule :param inplace: whether to apply data frame transformations in-place", "self.keep = [keep] if type(keep) == str else keep self.drop = drop def", "mapping old column names to new names \"\"\" super().__init__() self.columnsMap = columnsMap def", "DataFrame transformer that filters columns by retaining or dropping specified columns \"\"\" def", "cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft return", "multiple columns, these columns will be normalised in the same way (using the", "set. \"\"\" return self._name def setName(self, name): self._name = name @abstractmethod def _fit(self,", "df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def isFitted(self): return True class", "rows for which the value is not in the setToDrop \"\"\" def __init__(self,", "column(s) the rule applies to. If it applies to multiple columns, these columns", "will raise an error. :param arrayValuedResult: whether to replace the input columns by", "df = df.copy() matchedRulesByColumn = {} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns)", "except Exception as e: raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def", "\"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts", "single row must all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer =", "name containing arrays as values instead of creating a separate column per original", "returned value will be assigned to the column as a whole \"\"\" super().__init__()", "self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected", "the rule matches multiple columns. \"\"\" if skip and (transformer is not None", "= self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts", "for performance optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]):", "be replaced by a list one-hot encoded columns each (or an array-valued column", "= df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df", "d = super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] = self._rules else: d[\"userRules\"]", "for transformers whose logic is entirely based on rules and does not need", "rows for which the function returns True \"\"\" def __init__(self, column: str, condition:", "None: raise Exception(f\"No transformer to fit: {rule} defines no transformer and instance has", "to apply to the matching column(s) for the case where a transformation is", "state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"]", "the same way (using the same normalisation process for each column) unless independentColumns=True.", "= transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return", "self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns", "super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is", "df = df.copy() if self.keep is not None: df = df[self.keep] if self.drop", "fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"]", "df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by", "df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else:", ":param columnsMap: dictionary mapping old column names to new names \"\"\" super().__init__() self.columnsMap", "df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop: Set = None):", "for convenient construction options. :param arrayValued: whether the column values are not scalars", "= np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if", "returns True \"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column =", "each column uses a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns:", "the transformation shall apply; if None, apply it to all columns :param inplace:", "name of the column to be modified :param columnTransform: a function operating on", "in the same way. If multiple columns are transformed, then the arrays belonging", "[np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a) for a in flatColArrays] if", "flag indicating whether no transformation shall be performed on the matching column(s) :param", "= self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in", "Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular expression", "Exception(f\"No transformer to fit: {rule} defines no transformer and instance has no transformer", "of these columns, transform them into a supported column before applying {self.__class__.__name__}.\") if", "encoded columns for this feature will be all zeros. if False, an unknown", "InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic is entirely based", "[row[:, iCol] for row in transformedValues] return df def _apply(self, df): return self._apply_transformer(df,", "rule is a placeholder rule and the regex must be set later via", "independentColumns: whether a separate transformation is to be learned for each of the", "class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self)", ":return: a transformer whose (forward) transformation is the inverse transformation of this DFT", "if self.keep is not None: df = df.loc[self.keep] if self.drop is not None:", "for this feature will be all zeros. if False, an unknown category will", "self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns)", "List, Sequence, Union, Dict, Callable, Any, Optional, Set import numpy as np import", "Union, Dict, Callable, Any, Optional, Set import numpy as np import pandas as", "column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column: the name of", "transformer/transformerFactory is not None\") self.regex = re.compile(regex) if regex is not None else", "= df.copy() matchedRulesByColumn = {} for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if", "x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] =", "rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else:", "Perhaps the feature generator from which the rule originated was never applied in", "return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] = self.column return info class", "a DataFrameTransformer which is not fitted: \" f\"the df transformer {self.getName()} requires fitting\")", "True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self,", "dictionary mapping old column names to new names \"\"\" super().__init__() self.columnsMap = columnsMap", "= columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\"", "df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data", "= super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def", "sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer", "within a column (which may vary in length) are to be transformed in", "OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def __setstate__(self, state): if", ":return: the name of this dft transformer, which may be a default name", "self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) ==", "template (disabling `fit` where appropriate). Otherwise, use a factory. :param transformerFactory: a factory", "def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df if not self.inplace:", "trigger an exception if attempted) :param transformer: a transformer instance (from sklearn.preprocessing, e.g.", "is to be learned for each of the columns for the case where", "the column name. If None, the possible values will be inferred from the", "def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT =", "numpy as np import pandas as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer", "not all columns are matched by a rule :param inplace: whether to apply", "categories is None self.oneHotEncoders = None if columns is None: self._columnsToEncode = []", "d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return True", "!= 1: raise ValueError(f\"Columns {cols} do not contain the same number of values:", ":return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def", "an error. :param arrayValuedResult: whether to replace the input columns by columns of", "\"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation is the inverse transformation of", "zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False", "regex must match at most one column. :param fit: whether the rule's transformer", "getName(self) -> str: \"\"\" :return: the name of this dft transformer, which may", "specialisedRule.regex = re.compile(r) except Exception as e: raise Exception(f\"Could not compile regex '{r}':", "= super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode", "\"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name", "str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df:", "applied) :param columns: the set of column names to which the transformation shall", "values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a)", "frame by applying a vectorized condition on the selected column and retaining only", "len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if", "= setToDrop self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)]", "data frame on the selected column and retains only the rows for which", "transformer will only be applied to columns matched by such rules, unmatched columns", "self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame: if not self.inplace: df", "encodedArray[:, i] else: df[columnName] = list(encodedArray) return df def info(self): info = super().info()", "False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments passed to init", "containing arrays as values instead of creating a separate column per original value", "-> pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if", "self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode =", "be normalised in the same way (using the same normalisation process for each", "bool]): super().__init__() self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition,", "= inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if", "a data frame by applying a vectorized condition on the selected column and", "ToStringMixin): \"\"\" Base class for data frame transformers, i.e. objects which can transform", "def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def", "are given, the containing instance's default factory will be used. See `SkLearnTransformerFactoryFactory` for", "feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory.", "..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin log", "returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition def", "columns :param inplace: whether to apply the transformation in-place :param arrayValued: whether to", "self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in", "does not apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders", "0: raise Exception(f\"The following columns are not handled by any rules: {unhandledColumns}; rules:", "single transformer based on the values of all applicable columns) \"\"\" class RuleTemplate:", "transformer whose (forward) transformation is the inverse transformation of this DFT \"\"\" return", "are not scalars but arrays (of arbitrary lengths). It is assumed that all", "data frame by applying a condition function to each row and retaining only", "the possible values of each of the specified columns (for case where sequence", "same way (using the same normalisation process for each column) unless independentColumns=True. If", "pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) ->", "df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() for cg in", "convenient construction options. :param independentColumns: whether a separate transformation is to be learned", "neither `transformer` nor `transformerInstance` are given, the containing instance's default factory will be", "in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df", "None else None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted =", "toRule(self, regex: Optional[str]): \"\"\" Convert the template to a rule for all columns", "be persisted here # for backwards compatibility with persisted DFTs based on code", "def _fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def isFitted(self): return", "in the same way (using the same normalisation process for each column) unless", "value is in the setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__()", "for col, categories in categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given categories", "return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to (a subset", "def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] =", "process for each column) unless independentColumns=True. If None, the rule is a placeholder", "\"\"\" Applies a transformer from sklearn.preprocessing to (a subset of the columns of)", "'columnTransform'. This transformer can be used to utilise Numpy vectorisation for performance optimisation.", "skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True,", "use a factory. :param transformerFactory: a factory for the generation of the transformer", ":param ignoreUnknown: if True and an unknown category is encountered during transform, the", "None self._paramInfo = {} # arguments passed to init that are not saved", "if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in", "self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame:", "set of rules; rules are always fitted and applied in the given order", "state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols =", "pd.Series, np.ndarray]]): \"\"\" :param column: the name of the column to be modified", "info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a", "__len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers:", "categorical variables :param columns: list of names or regex matching names of columns", "self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation:", "original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders", "= super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self, colName:", "the function returns True \"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__()", "retains only the rows for which the value is not in the setToDrop", "column to be modified :param columnTransform: a function that takes a Numpy array", "= None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex:", "which the rule originated was never applied in order to have the rule", "rule for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r)", "DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__)", "name has not been set. \"\"\" return self._name def setName(self, name): self._name =", "self.columns if cols is None: cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x))", "1: raise ValueError(f\"Columns {cols} do not contain the same number of values: {lengths}\")", "(possibly applying the transformation to the original data frame - in-place transformation). A", "keep: Set = None, drop: Set = None): super().__init__() self.drop = drop self.keep", "in the same way. If arrayValued is True, only a single matching column", "has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False,", "only be applied to columns matched by such rules, unmatched columns will not", "if columns is None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns) ==", "= [] self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode", "df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals", "frame by applying a condition function to each row and retaining only the", "isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]):", "pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] =", "whether to apply the transformation in-place :param arrayValued: whether to apply transformation not", "handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not", "def info(self): info = super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info", "\"\"\" :return: a transformer whose (forward) transformation is the inverse transformation of this", ":param inplace: whether to perform the transformation in-place :param ignoreUnknown: if True and", "self.column = column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column]", "super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self,", "= self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data", "normalised in the same way (using the same normalisation process for each column)", "> 1: raise Exception(f\"Array-valued case is only supported for a single column, matched", "lack the __isFitted attribute and we assume that each such DFT was fitted", "for i in range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else: df[columnName]", "info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer):", "rule :param inplace: whether to apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled", "an unknown category is encountered during transform, the resulting one-hot encoded columns for", "super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class", "it applies (learning a single transformer based on the values of all applicable", "None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is not", "of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers", "self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns", "be performed on the matching column(s) :param unsupported: flag indicating whether normalisation of", "to a single row must all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\")", "by applying a boolean function to one of the columns and retaining only", "Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One", "df = transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0:", "def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None", ":param inplace: whether to apply the transformation in-place :param arrayValued: whether to apply", "for c in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than one rule", "copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as e: raise", "pd.DataFrame): pass def isFitted(self): return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__()", "or dictionary mapping column name to array of possible categories for the column", "dictionary mapping column name to array of possible categories for the column name.", "is not given; if neither `transformer` nor `transformerInstance` are given, the containing instance's", "is a placeholder rule and the regex must be set later via setRegex", "= independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the template to a rule", "self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df if not", "whether a separate transformation is to be learned for each of the columns", "dft transformer, which may be a default name if the name has not", "from which the rule originated was never applied in order to have the", "independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self)", "state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) ->", "= None): super().__init__() self.keep = [keep] if type(keep) == str else keep self.drop", "values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") # collect", "self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values)", "columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts()", "for row in transformedValues] return df def _apply(self, df): return self._apply_transformer(df, False) def", "-> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying", "{col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if len(columns) !=", "info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else", "if False, an unknown category will raise an error. :param arrayValuedResult: whether to", "must match at most one column. :param fit: whether the rule's transformer shall", "self.fit = fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False,", "range(encodedArray.shape[1]): df[\"%s_%d\" % (columnName, i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray) return", "\"\"\" Convert the template to a rule for all columns matching the regex", "def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column = column", "all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns =", "super().__init__() self.setToDrop = setToDrop self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "col in columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\"", "df.columns if not self.arrayValued: values = df[cols].values else: if len(cols) == 1: values", "self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in ascending", "= decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index)", "if not self.inplace: df = df.copy() matchedRulesByColumn = {} for rule in self._rules:", "into another (possibly applying the transformation to the original data frame - in-place", "effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for", "multiple columns. \"\"\" if skip and transformer is not None: raise ValueError(\"skip==True while", "info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self, colName: str) ->", "self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers])", "i.e. the regex must match at most one column. :param fit: whether the", "frame into another (possibly applying the transformation to the original data frame -", "by applying a set of transformation rules, where each rule defines a set", "in particular, the instance to be shared across several models that use the", "df[col] = [row[:, iCol] for row in transformedValues] return df def _apply(self, df):", "skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued", "names of columns that are to be replaced by a list one-hot encoded", "import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util import flattenArguments from ..util.pandas import", "if attempted) :param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply", "info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation:", "Exception(f\"More than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule", "*dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self,", "\"\"\" if skip and (transformer is not None or transformerFactory is not None):", "None or transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\")", "transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)]", "column(s) :param unsupported: flag indicating whether normalisation of the matching column(s) is unsupported", "d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex = re.compile(regex) def", "\"\"\" A DataFrame transformer that filters columns by retaining or dropping specified columns", "= {} # arguments passed to init that are not saved otherwise can", "to replace the input columns by columns of the same name containing arrays", "not None: raise ValueError(\"skip==True while transformer is not None\") self.skip = skip self.unsupported", "setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column", "f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {})", "the matching column(s) for the case where a transformation is necessary (skip=False, unsupported=False).", "'column' using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param", "self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer):", "cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\"", "sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self,", "[\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self.regex is not", "during transform, the resulting one-hot encoded columns for this feature will be all", "= categories is None self.oneHotEncoders = None if columns is None: self._columnsToEncode =", "case where sequence is specified in 'columns') or dictionary mapping column name to", "the name of the column to be modified :param columnTransform: a function operating", "ignoreUnknown else \"error\" if categories is not None: if type(categories) == dict: self.oneHotEncoders", "if self.drop is not None: df = df.drop(columns=self.drop) return df def info(self): info", "transformer that filters columns by retaining or dropping specified columns \"\"\" def __init__(self,", "applied if `transformer` is not given; if neither `transformer` nor `transformerInstance` are given,", "independentColumns=False): \"\"\" :param regex: a regular expression defining the column(s) the rule applies", "all applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol =", "whole \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame)", "typing import List, Sequence, Union, Dict, Callable, Any, Optional, Set import numpy as", "new column with counts of the values on a selected column \"\"\" def", "getNames(self) -> List[str]: \"\"\" :return: the list of names of all contained feature", "the regex :param regex: a regular expression defining the column the rule applies", "df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for c in matchingColumns: if not rule.arrayValued: df[c] =", "boolean function to one of the columns and retaining only the rows for", "of the same name containing arrays as values instead of creating a separate", "= inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df =", "columns (for case where sequence is specified in 'columns') or dictionary mapping column", "pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals =", "info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a vectorized condition", "via setRegex or the rule will not be applicable. :param skip: flag indicating", "from ..util.string import orRegexGroup, ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base", "normalised in the same way. If arrayValued is True, only a single matching", "else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a) for a", "return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns by retaining", "df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info()", "self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns", "info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer):", "transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s) for", "if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The", "must be set later via setRegex or the rule will not be applicable.", "most one column. :param fit: whether the rule's transformer shall be fitted :param", "d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\", True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None)", "pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] = self.column return info", "None def matchingColumns(self, columns: Sequence[str]): return [col for col in columns if self.matches(col)]", "column as a whole \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def", "independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) ->", "prior to commit 7088cbbe # They lack the __isFitted attribute and we assume", "columns for the case where the rule matches multiple columns. \"\"\" if skip", "whether the rule's transformer shall be fitted :param independentColumns: whether a separate transformation", "__init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name of the", "__init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the", "hot encode categorical variables :param columns: list of names or regex matching names", "actually to be one-hot-encoded :param categories: numpy arrays containing the possible values of", "values instead of creating a separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"]", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info()", "apply to the matching column(s) for the case where a transformation is necessary", "pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals def", "transformer applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def", "= np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values = applicableDF.values else: values", "rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") # collect specialised rule", "applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"] = self.columns", "series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] =", "= None if columns is None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif", "def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls): return", "return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class", "column(s) is unsupported (shall trigger an exception if attempted) :param transformer: a transformer", "is None: cols = df.columns if not self.arrayValued: values = df[cols].values else: if", "a list one-hot encoded columns each (or an array-valued column for the case", "columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount", "transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching", "columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index,", "containing the possible values of each of the specified columns (for case where", "columns :param inplace: whether to perform the transformation in-place :param ignoreUnknown: if True", "return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a boolean", "_fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for rule in self._userRules:", "invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports", "matching column(s) :param unsupported: flag indicating whether normalisation of the matching column(s) is", "self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self):", "attribute and we assume that each such DFT was fitted def __setstate__(self, d):", "that filters columns by retaining or dropping specified columns \"\"\" def __init__(self, keep:", "columns. \"\"\" if skip and transformer is not None: raise ValueError(\"skip==True while transformer", "has not been set. \"\"\" return self._name def setName(self, name): self._name = name", "frame transformers. During fit and apply each transformer in the chain receives the", "columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None if columns is None:", "__init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False):", "elif rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values), 1))", "Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot", "-> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] =", "name of the column to be modified :param columnTransform: a function that takes", "Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of rules; rules are", "sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if len(columns) != len(categories): raise", "df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and", "self._columnsToEncode is None: self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex, c)", "= unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit =", "will be all zeros. if False, an unknown category will raise an error.", "transformer may require being fitted using training data. \"\"\" def __init__(self): self._name =", "of the columns for the case where the rule matches multiple columns. \"\"\"", "containing instance's default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options.", "self._isFitted = True def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame:", "rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else:", "a supported column before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None:", "if you want, in particular, the instance to be shared across several models", "to columns matched by such rules, unmatched columns will not be transformed. :param", "will not be transformed. :param requireAllHandled: whether to raise an exception if not", "cols is None: cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse", "or the rule will not be applicable. :param skip: flag indicating whether no", "all entries in such arrays are to be normalised in the same way.", "the name of this dft transformer, which may be a default name if", "rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators =", "array-valued columns, where the values of all arrays within a column (which may", "assume that each such DFT was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\",", "which it returns True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__()", "these columns, transform them into a supported column before applying {self.__class__.__name__}.\") if not", "{ \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None, \"isFitted\":", "df[self.keep] if self.drop is not None: df = df.drop(columns=self.drop) return df def info(self):", "containing instance's default factory will be used. NOTE: Use an instance only if", "are matched by a rule :param inplace: whether to apply data frame transformations", "instance (from sklearn.preprocessing) to use (which will be fitted & applied) :param columns:", "do not contain the same number of values: {lengths}\") values = np.stack(flatColArrays, axis=1)", "pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified", "column before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is None: if rule.transformerFactory", "be fitted & applied) :param columns: the set of column names to which", "self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules", "def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if self._rules is not None:", "by a rule :param inplace: whether to apply data frame transformations in-place \"\"\"", "in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if c in matchedRulesByColumn:", "self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return df if", "rule.skip: if rule.transformer is None: if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory()", "column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values)", "None: self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not", "fitted: \" f\"the df transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return", "= fit self.independentColumns = independentColumns def __setstate__(self, state): setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True,", "[rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info", "= column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])]", "by columns of the same name containing arrays as values instead of creating", "Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[],", "= self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray],", "pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None,", "arrayValued: whether to apply transformation not to scalar-valued columns but to one or", "particular, the instance to be shared across several models that use the same", "applies to multiple columns, these columns will be normalised in the same way", "if len(matchingColumns) == 0: continue for c in matchingColumns: matchedRulesByColumn[c] = rule if", "{col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)} def __setstate__(self, state):", "re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not apply", "= vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info", "\"\"\" :param regex: a regular expression defining the column(s) the rule applies to.", "_fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def", "def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass", "Exception(\"Attempted to apply a placeholder rule. Perhaps the feature generator from which the", "pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df)", "has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF", "to make use of these columns, transform them into a supported column before", "be transformed in the same way. If multiple columns are transformed, then the", "def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\") return d", "way (using the same normalisation process for each column) unless independentColumns=True. If None,", "df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0:", "df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\":", "setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self, column: str): if self.regex is", "be set later via setRegex or the rule will not be applicable. :param", "raise Exception(f\"The following columns are not handled by any rules: {unhandledColumns}; rules: {',", "return [transf.getName() for transf in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] =", "= series return df def info(self): info = super().info() info[\"inplace\"] = self.inplace return", "df: pd.DataFrame): matchedRulesByColumn = {} self._rules = [] for rule in self._userRules: matchingColumns", "a boolean function to one of the columns and retaining only the rows", "__init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old column names to", "an instance only if you want, in particular, the instance to be shared", "that use the same feature with associated rule/rule template (disabling `fit` where appropriate).", "rule applies to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer,", "def _toStringExcludePrivate(self) -> bool: return True def getName(self) -> str: \"\"\" :return: the", "list(encodedArray) return df def info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] =", "= self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame)", "for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName)", "transformed, they are transformed independently (i.e. each column uses a separately trained transformation).", "fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted def fitApply(self,", "columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self, df:", "self.oneHotEncoders = None if columns is None: self._columnsToEncode = [] self._columnNameRegex = \"$\"", "in matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than one rule applies to", "_checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) >", "pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn = {} for rule in", "self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame", "(forward) transformation is the inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class", "inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else: if", "arrays (of arbitrary lengths). It is assumed that all entries in such arrays", "in cols] lengths = [len(a) for a in flatColArrays] if len(set(lengths)) != 1:", "on the matching column(s) :param unsupported: flag indicating whether normalisation of the matching", "i] else: df[columnName] = list(encodedArray) return df def info(self): info = super().info() info[\"inplace\"]", "return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if", "later via setRegex or the rule will not be applicable. :param skip: flag", "but to one or more array-valued columns, where the values of all arrays", "unsupported according to {rule}. If you want to make use of these columns,", "a transformer for all rules that don't specify a particular transformer. The default", "info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df:", "info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts of the values", "str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def", "from sklearn.preprocessing to (a subset of the columns of) a data frame. If", "df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies", "return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary", "by 'column' using 'columnTransform'. This transformer can be used to utilise Numpy vectorisation", "= arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def", "a Numpy ufunc that applies to an entire Series \"\"\" super().__init__() self.column =", "False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] = self._paramInfo.get(\"columns\")", "self.column = column self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "will be inferred from the columns :param inplace: whether to perform the transformation", "= super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def", "inplace: whether to perform the transformation in-place :param ignoreUnknown: if True and an", "df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return df", "columns of the same name containing arrays as values instead of creating a", "= setToKeep self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)]", "decimals=0): super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values,", "values = df[cols].values else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values =", "the rows for which it returns True \"\"\" def __init__(self, column: str, vectorizedCondition:", "df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def", "matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of columns", "\"\"\" :return: the name of this dft transformer, which may be a default", "def matchingColumns(self, columns: Sequence[str]): return [col for col in columns if self.matches(col)] def", "the column to be modified :param columnTransform: a function operating on single cells", "if self.keep is not None: df = df[self.keep] if self.drop is not None:", "transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False,", "df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame by applying a", "\"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep = setToKeep self.column =", "selected column and retains only the rows for which the value is in", "factory. :param transformerFactory: a factory for the generation of the transformer instance, which", "self._paramInfo = {} # arguments passed to init that are not saved otherwise", "info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo)", "return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass", "Applies a transformer from sklearn.preprocessing to (a subset of the columns of) a", "is not fitted: \" f\"the df transformer {self.getName()} requires fitting\") df = self._apply(df)", "frame - in-place transformation). A data frame transformer may require being fitted using", "to be transformed in the same way. If multiple columns are transformed, then", "super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column", "be inferred from the columns :param inplace: whether to perform the transformation in-place", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer):", "one data frame into another (possibly applying the transformation to the original data", "return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin):", "on the values of all applicable columns) \"\"\" class RuleTemplate: def __init__(self, skip=False,", "transform one data frame into another (possibly applying the transformation to the original", "set of transformation rules, where each rule defines a set of columns to", "the rule's transformer shall be fitted :param independentColumns: whether a separate transformation is", "{cols} do not contain the same number of values: {lengths}\") values = np.stack(flatColArrays,", "following columns are not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\")", "not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is not fitted: \" f\"the", "instead of creating a separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] =", "options. :param arrayValued: whether the column values are not scalars but arrays (of", "len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays =", "= self.column info[\"setToKeep\"] = self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data", "columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old column names to new", "findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft", "for which the value is not in the setToDrop \"\"\" def __init__(self, column:", "to any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders", "elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex =", "class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters columns by retaining or dropping", "sklearn.preprocessing to (a subset of the columns of) a data frame. If multiple", "be applied if `transformer` is not given; if neither `transformer` nor `transformerInstance` are", "they are transformed independently (i.e. each column uses a separately trained transformation). \"\"\"", "self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts", "columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"] = self.decimals return info class", "[transform(np.stack(row, axis=1)) for row in df.values] for iCol, col in enumerate(cols): df[col] =", "return df if not self.inplace: df = df.copy() for columnName in self._columnsToEncode: encodedArray", "cols = self.columns if cols is None: cols = df.columns transform = (lambda", "-> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop:", "self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for", "transform(df[cols].values) else: if len(cols) == 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0]", "df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def", "__init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition", "DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and retains only", ":param arrayValuedResult: whether to replace the input columns by columns of the same", "column with counts of the values on a selected column \"\"\" def __init__(self,", "np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables :param", "transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the template to", "in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode)", "the regex must match at most one column. :param fit: whether the rule's", "Filters a data frame by applying a vectorized condition on the selected column", "columns, transform them into a supported column before applying {self.__class__.__name__}.\") if not rule.skip:", "InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame)", "None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag indicating whether", "\" f\"the df transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df", "setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes()", "df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn = {}", "values = applicableDF.values.flatten() values = values.reshape((len(values), 1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule}", "a single column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values),", "use of these columns, transform them into a supported column before applying {self.__class__.__name__}.\")", "= super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return", "self.setToKeep = setToKeep self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "in enumerate(cols): df[col] = [row[:, iCol] for row in transformedValues] return df def", "drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is", "not rule.skip: if rule.transformer is None: if rule.transformerFactory is not None: rule.transformer =", "be applied to columns matched by such rules, unmatched columns will not be", "{} self._rules = [] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c", "flatColArrays] if len(set(lengths)) != 1: raise ValueError(f\"Columns {cols} do not contain the same", "return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToKeep\"] = self.setToKeep", "\"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) ->", "info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer):", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is not", "self.regex = re.compile(regex) def matches(self, column: str): if self.regex is None: raise Exception(\"Attempted", "to data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass def", "columns is unsupported (shall trigger an exception if attempted) :param transformer: a transformer", "the rule applies to. If it applies to multiple columns, these columns will", "if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values) else: for", "None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a", "f\"{rule} matched no columns\") # collect specialised rule for application specialisedRule = copy.copy(rule)", "are transformed, then the arrays belonging to a single row must all have", "of all contained feature generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def", "not self.inplace: df = df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if", "independentColumns=False): \"\"\" :param skip: flag indicating whether no transformation shall be performed on", ":param independentColumns: whether a separate transformation is to be learned for each of", "matchingColumns: if c in matchedRulesByColumn: raise Exception(f\"More than one rule applies to column", "None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex = re.compile(regex) if regex", "self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) ->", "must all have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns", "if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is not fitted: \"", "\"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def __len__(self): return", "that don't specify a particular transformer. The default transformer will only be applied", "DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\" Adds a new column with counts of the values on a", "df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker =", "super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None if", "not None\") self.regex = re.compile(regex) if regex is not None else None self.skip", "to all columns :param inplace: whether to apply the transformation in-place :param arrayValued:", "vectorized condition on the selected column and retaining only the rows for which", "is necessary (skip=False, unsupported=False). If None is given, either transformerFactory or the containing", "info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in ascending order \"\"\"", "to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns,", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep:", "this feature will be all zeros. if False, an unknown category will raise", "return df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is", "Optional, Set import numpy as np import pandas as pd from sklearn.preprocessing import", "pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a", "self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's", "the chain receives the transformed output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers:", "= super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def", "columns: Sequence[str]): return [col for col in columns if self.matches(col)] def __init__(self, rules:", "_toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d", "application of a chain of data frame transformers. During fit and apply each", "transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular", "rule matches multiple columns. \"\"\" if skip and (transformer is not None or", "persisted DFTs based on code prior to commit 7088cbbe # They lack the", "a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None,", "= flattenArguments(dataFrameTransformers) def __len__(self): return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for", "that each such DFT was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\")", "from which the returned value will be assigned to the column as a", "the transformer instance (from sklearn.preprocessing) to use (which will be fitted & applied)", "from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from", "df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info = super().info() info[\"column\"] =", "cg.generateColumn(df) df[series.name] = series return df def info(self): info = super().info() info[\"inplace\"] =", "columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace:", "{}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return True def getName(self) ->", "return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for transformer", "saved otherwise can be persisted here # for backwards compatibility with persisted DFTs", "and retains only the rows for which the value is not in the", "self, state) def _fit(self, df: pd.DataFrame): cols = self.columns if cols is None:", "fit and apply each transformer in the chain receives the transformed output of", "raise Exception(f\"More than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] =", "only be applied if `transformer` is not given; if neither `transformer` nor `transformerInstance`", "return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param", "to apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace =", "\"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) ->", "cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x:", "= super().info() info[\"column\"] = self.column return info class DFTRowFilter(RuleBasedDataFrameTransformer): \"\"\" Filters a data", "column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def", "the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be used", "len(matchingColumns) == 0: continue for c in matchingColumns: matchedRulesByColumn[c] = rule if not", "df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using", "super().info() info[\"decimals\"] = self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from", "for col, categories in zip(columns, categories)} def __setstate__(self, state): if \"arrayValuedResult\" not in", "matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns =", "(lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols]", "values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col", "creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be used to", "was never applied in order to have the rule instantiated.\") return self.regex.fullmatch(column) is", "self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is", "orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as e: raise Exception(f\"Could not compile", "for the case where a transformation is necessary (skip=False, unsupported=False). If None is", "None, the rule is a placeholder rule and the regex must be set", "mapping column name to array of possible categories for the column name. If", "will be assigned to the column as a whole \"\"\" super().__init__() self.column =", "import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from ..util.pickle import setstate from ..util.string import", "matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c in matchingColumns: matchedRulesByColumn[c]", "return df def info(self): info = super().info() info[\"inplace\"] = self.inplace return info class", "pd.DataFrame): cols = self.columns if cols is None: cols = df.columns if not", "from ..columngen import ColumnGenerator from ..util import flattenArguments from ..util.pandas import DataFrameColumnChangeTracker from", "from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from", "matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns", "according to {rule}. If you want to make use of these columns, transform", "by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) ->", "instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s) for the", "and transformer is not None: raise ValueError(\"skip==True while transformer is not None\") self.skip", "transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be used to create a", "indicating whether no transformation shall be performed on the matching column(s) :param unsupported:", "independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the template to a rule for", "defines a set of columns to which it applies (learning a single transformer", "columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param sklearnTransformer: the transformer instance (from", "order :param defaultTransformerFactory: a factory for the creation of transformer instances (from sklearn.preprocessing,", "whether normalisation of the matching column(s) is unsupported (shall trigger an exception if", "raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex = re.compile(regex) if regex is", "-> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] = self.column return", "series = cg.generateColumn(df) df[series.name] = series return df def info(self): info = super().info()", "return True class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def", "_fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c for c in", "with persisted DFTs based on code prior to commit 7088cbbe # They lack", "else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult", "pd.DataFrame: df = df.copy() if self.keep is not None: df = df[self.keep] if", "1)) rule.transformer.fit(values) else: log.log(logging.DEBUG - 1, f\"{rule} matched no columns\") # collect specialised", "of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that shall be used to create", "{rule}. If you want to make use of these columns, transform them into", "__setstate__(self, state): if \"arrayValuedResult\" not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self)", "the columns and retaining only the rows for which the function returns True", "= None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult", "self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None]", "where the rule matches multiple columns. \"\"\" if skip and transformer is not", "to create a transformer for all rules that don't specify a particular transformer.", "\"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory =", "transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str],", "in-place :param arrayValued: whether to apply transformation not to scalar-valued columns but to", "with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory. :param", "used to create a transformer for all rules that don't specify a particular", "apply it to all columns :param inplace: whether to apply the transformation in-place", "and retains only the rows for which the value is in the setToKeep", "list of names or regex matching names of columns that are to be", "useArrayValues=True); If None, then no columns are actually to be one-hot-encoded :param categories:", "for dft in self.dataFrameTransformers: if isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer):", "class for transformers whose logic is entirely based on rules and does not", "Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled:", "that all entries in such arrays are to be normalised in the same", "= columns self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] =", "rules: the set of rules; rules are always fitted and applied in the", "all arrays within a column (which may vary in length) are to be", "column values are not scalars but arrays (of arbitrary lengths). It is assumed", "= self.decimals return info class DFTSkLearnTransformer(InvertibleDataFrameTransformer): \"\"\" Applies a transformer from sklearn.preprocessing to", "Exception(f\"Cannot apply a DataFrameTransformer which is not fitted: \" f\"the df transformer {self.getName()}", "a data frame on the selected column and retains only the rows for", "same way. If multiple columns are transformed, then the arrays belonging to a", "the columns :param inplace: whether to perform the transformation in-place :param ignoreUnknown: if", "apply a placeholder rule. Perhaps the feature generator from which the rule originated", "SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param", "values will be inferred from the columns :param inplace: whether to perform the", "transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules = rules", "= self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn = {} self._rules =", "regex: a regular expression defining the column the rule applies to :return: the", "def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition =", "case where the rule matches multiple columns. \"\"\" if skip and (transformer is", "True) def info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"]", "all rules that don't specify a particular transformer. The default transformer will only", "_apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True) def info(self):", "the containing instance's default factory will be used. NOTE: Use an instance only", "info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class", "columns {matchingColumns} is unsupported according to {rule}. If you want to make use", "If you want to make use of these columns, transform them into a", "in df.values] for iCol, col in enumerate(cols): df[col] = [row[:, iCol] for row", "be applicable. :param skip: flag indicating whether no transformation shall be performed on", "= self._rules else: d[\"userRules\"] = self._userRules return d def _fit(self, df: pd.DataFrame): matchedRulesByColumn", "\"\"\" Modifies a column specified by 'column' using 'columnTransform' \"\"\" def __init__(self, column:", "apply a DataFrameTransformer which is not fitted: \" f\"the df transformer {self.getName()} requires", "DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False,", "str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation = aggregation def _apply(self, df:", "regex :param regex: a regular expression defining the column the rule applies to", "self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.arrayValued", "- set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following columns are not handled", "names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "= transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns = independentColumns def __setstate__(self,", "str): self.regex = re.compile(regex) def matches(self, column: str): if self.regex is None: raise", "class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and retains", "handle_unknown=self.handleUnknown) for col, categories in categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given", "pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not self.isFitted():", "retaining only the rows for which it returns True \"\"\" def __init__(self, column:", "self.handleUnknown info[\"arrayValuedResult\"] = self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer", "(learning a single transformer based on the values of all applicable columns) \"\"\"", "self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame: series = df[self.columnForEntryCount].value_counts() return", "if len(columns) != len(categories): raise ValueError(f\"Given categories must have the same length as", "'{c}': {matchedRulesByColumn[c]}, {rule}\") matchedRulesByColumn[c] = rule if len(matchingColumns) > 0: if rule.unsupported: raise", "transformation in-place :param arrayValued: whether to apply transformation not to scalar-valued columns but", "True) d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def", "raise Exception(\"Attempted to apply a placeholder rule. Perhaps the feature generator from which", "self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if", "is None: self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is", "names of all contained feature generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers]", "skip and (transformer is not None or transformerFactory is not None): raise ValueError(\"skip==True", "in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info = super().info() info[\"requireAllHandled\"] =", "= columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode = columns self.inplace", "if neither `transformer` nor `transformerInstance` are given, the containing instance's default factory will", "rule matches multiple columns. \"\"\" if skip and transformer is not None: raise", "def info(self): info = super().info() info[\"columns\"] = self.columns info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] =", "def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod", "is None: raise Exception(\"Attempted to apply a placeholder rule. Perhaps the feature generator", "> 0: raise Exception(f\"The following columns are not handled by any rules: {unhandledColumns};", "columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters", "for rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for", "categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode", "a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s)", "return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if", "class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform'. This transformer", "optimisation. \"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param", "column name to array of possible categories for the column name. If None,", "super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "(for case where sequence is specified in 'columns') or dictionary mapping column name", "col in enumerate(cols): df[col] = [row[:, iCol] for row in transformedValues] return df", "ABC): @abstractmethod def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\":", "exception if attempted) :param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to", "from the columns :param inplace: whether to perform the transformation in-place :param ignoreUnknown:", "receives the transformed output of its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]):", "> 0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to", "if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only supported for", "are not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self,", "class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected column and retains", ":param categories: numpy arrays containing the possible values of each of the specified", "`transformerInstance` are given, the containing instance's default factory will be used. See `SkLearnTransformerFactoryFactory`", "arbitrary lengths). It is assumed that all entries in such arrays are to", "factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer applicableDF = df[sorted(matchingColumns)] if", "self.inplace: df = df.copy() for cg in self.columnGenerators: series = cg.generateColumn(df) df[series.name] =", "decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def", "skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class Rule(ToStringMixin): def __init__(self,", "pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by", "np import pandas as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol", "transformation to the original data frame - in-place transformation). A data frame transformer", "class InverseDataFrameTransformer(RuleBasedDataFrameTransformer): def __init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df:", "as np import pandas as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import", "regex: Optional[str]): \"\"\" Convert the template to a rule for all columns matching", "\"ignore\" if ignoreUnknown else \"error\" if categories is not None: if type(categories) ==", "shall be fitted :param independentColumns: whether a separate transformation is to be learned", "the column the rule applies to :return: the resulting Rule \"\"\" return DFTNormalisation.Rule(regex,", "generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def info(self): info = super().info()", "= requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules =", "colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return rule class", "Sequence[str]] = None): super().__init__() self.keep = [keep] if type(keep) == str else keep", "returns True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column =", "-> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def", "rule if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is", "Set = None, drop: Set = None): super().__init__() self.drop = drop self.keep =", "self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return df def info(self): info =", "each rule defines a set of columns to which it applies (learning a", "rule if not rule.skip: if rule.independentColumns and not rule.arrayValued: matchingColumns = sorted(matchingColumns) df[matchingColumns]", "category is encountered during transform, the resulting one-hot encoded columns for this feature", "not scalars but arrays (of arbitrary lengths). It is assumed that all entries", "If multiple columns are transformed, they are transformed independently (i.e. each column uses", "expression defining the column(s) the rule applies to. If it applies to multiple", "None\") self.regex = re.compile(regex) if regex is not None else None self.skip =", "DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation = columnForAggregation self.aggregation =", "self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(), }", "the application of a chain of data frame transformers. During fit and apply", "new names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "NOTE: Use an instance only if you want, in particular, the instance to", "(a subset of the columns of) a data frame. If multiple columns are", "the case where useArrayValues=True); If None, then no columns are actually to be", "will only be applied to columns matched by such rules, unmatched columns will", "rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise Exception(f\"No transformer to fit: {rule} defines", "pandas as pd from sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen", "e: raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn):", "return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info =", "self.setToDrop = setToDrop self.column = column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "into a supported column before applying {self.__class__.__name__}.\") if not rule.skip: if rule.transformer is", "enumerate(cols): df[col] = [row[:, iCol] for row in transformedValues] return df def _apply(self,", ":param unsupported: flag indicating whether normalisation of all columns is unsupported (shall trigger", "return self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]): return [col for col", "d[\"_columnChangeTracker\"] = d.get(\"_columnChangeTracker\", None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self)", "-> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class DFTRoundFloats(RuleBasedDataFrameTransformer): def __init__(self, decimals=0): super().__init__() self.decimals = decimals", "using 'columnTransform'. This transformer can be used to utilise Numpy vectorisation for performance", "(i.e. each column uses a separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol,", "= self.setToKeep return info class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the", "transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft", "to which the transformation shall apply; if None, apply it to all columns", "pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"] =", "for which it returns True \"\"\" def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]):", "to be normalised in the same way. If arrayValued is True, only a", "Base class for data frame transformers, i.e. objects which can transform one data", "whether to perform the transformation in-place :param ignoreUnknown: if True and an unknown", "None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex = re.compile(regex)", "not in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d", "if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according to {rule}. If", "applicableDF = df[sorted(matchingColumns)] if rule.arrayValued: if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is", "-> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the", "\"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False, arrayValued=False): \"\"\" :param", "not None: df = df[self.keep] if self.drop is not None: df = df.drop(columns=self.drop)", "str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop self.column = column def _apply(self, df:", "supported for a single column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values", "to array of possible categories for the column name. If None, the possible", "Sequence[str]] = None, drop: Union[str, Sequence[str]] = None): super().__init__() self.keep = [keep] if", "super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount = columnForEntryCount def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is", "= df[cols].values else: if len(cols) == 1: values = np.concatenate(df[cols[0]].values.flatten()) values = values.reshape((len(values),", "logic is entirely based on rules and does not need to be fitted", "columns of) a data frame. If multiple columns are transformed, they are transformed", "data frame transformer may require being fitted using training data. \"\"\" def __init__(self):", "columns matching the regex :param regex: a regular expression defining the column the", "info = super().info() info[\"keep\"] = self.keep info[\"drop\"] = self.drop return info class DFTKeepColumns(DFTColumnFilter):", "is unsupported according to {rule}. If you want to make use of these", "def getNames(self) -> List[str]: \"\"\" :return: the list of names of all contained", "1)) elif rule.independentColumns: values = applicableDF.values else: values = applicableDF.values.flatten() values = values.reshape((len(values),", "the same normalisation process for each column) unless independentColumns=True. If None, the rule", "regex is not None else None self.skip = skip self.unsupported = unsupported self.transformer", "Filters a data frame on the selected column and retains only the rows", "is not None def matchingColumns(self, columns: Sequence[str]): return [col for col in columns", "a factory for the generation of the transformer instance, which will only be", "self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments passed", "columnForAggregation self.aggregation = aggregation def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.groupby(self.columnForAggregation).agg(self.aggregation) class", "self.regex.fullmatch(column) is not None def matchingColumns(self, columns: Sequence[str]): return [col for col in", "inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace = inplace def _apply(self, df: pd.DataFrame) ->", "True \"\"\" def __init__(self, column: str, condition: Callable[[Any], bool]): super().__init__() self.column = column", "df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]] =", "or more array-valued columns, where the values of all arrays within a column", "-> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain", "pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker = DataFrameColumnChangeTracker(df) if not", "only supported for a single column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten())", "handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame)", "DataFrameColumnChangeTracker(df) if not self.isFitted(): raise Exception(f\"Cannot apply a DataFrameTransformer which is not fitted:", "-> pd.DataFrame: if not self.inplace: df = df.copy() matchedRulesByColumn = {} for rule", "this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose", "whether to apply transformation not to scalar-valued columns but to one or more", "requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None", "transformer to fit: {rule} defines no transformer and instance has no transformer factory\")", "self.requireAllHandled: unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following", "column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] = categories is", "rules and does not need to be fitted to data\"\"\" def _fit(self, df:", "DFT was fitted def __setstate__(self, d): d[\"_name\"] = d.get(\"_name\", f\"{self.__class__.__name__}-{id(self)}\") d[\"_isFitted\"] = d.get(\"_isFitted\",", "`transformer` is not given; if neither `transformer` nor `transformerInstance` are given, the containing", "= super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\"", "the set of column names to which the transformation shall apply; if None,", "If None, the rule is a placeholder rule and the regex must be", "rule in self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c", "= len(self) return info def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]: for dft in self.dataFrameTransformers:", "where the values of all arrays within a column (which may vary in", "in the chain receives the transformed output of its predecessor. \"\"\" def __init__(self,", "fitted to data\"\"\" def _fit(self, df: pd.DataFrame): pass def fit(self, df: pd.DataFrame): pass", "applies (learning a single transformer based on the values of all applicable columns)", "generation of the transformer instance, which will only be applied if `transformer` is", "self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df = df.copy()", "transformer is not None: raise ValueError(\"skip==True while transformer is not None\") self.skip =", "self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"] = self.decimals return info", "for row in df.values] for iCol, col in enumerate(cols): df[col] = [row[:, iCol]", "rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else: if self._defaultTransformerFactory is None: raise", "while transformer/transformerFactory is not None\") self.regex = re.compile(regex) if regex is not None", "self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued = arrayValued def", "the case where the rule matches multiple columns. \"\"\" if skip and transformer", "x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df def info(self): info = super().info() info[\"requireAllHandled\"]", "not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in", "be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether a separate", "matching column(s) is unsupported (shall trigger an exception if attempted) :param transformer: a", "None: raise Exception(\"Attempted to apply a placeholder rule. Perhaps the feature generator from", "transformer instance (from sklearn.preprocessing) to use (which will be fitted & applied) :param", "self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(), } def fit(self, df: pd.DataFrame):", "= self.columns if cols is None: cols = df.columns transform = (lambda x:", "state) def _fit(self, df: pd.DataFrame): cols = self.columns if cols is None: cols", ":param rules: the set of rules; rules are always fitted and applied in", "def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return:", "columns will be normalised in the same way (using the same normalisation process", "def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol]", "= [np.concatenate(df[col].values.flatten()) for col in cols] lengths = [len(a) for a in flatColArrays]", "-> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation is the inverse transformation", "str]): \"\"\" :param columnsMap: dictionary mapping old column names to new names \"\"\"", "@abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "str: \"\"\" :return: the name of this dft transformer, which may be a", "= self._paramInfo.get(\"columns\") return d def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode", "SkLearnTransformerProtocol] = None, arrayValued=False, fit=True, independentColumns=False): \"\"\" :param regex: a regular expression defining", "to one of the columns and retaining only the rows for which the", "df = df[self.keep] if self.drop is not None: df = df.drop(columns=self.drop) return df", "multiple columns are transformed, they are transformed independently (i.e. each column uses a", "passed to init that are not saved otherwise can be persisted here #", "self.dataFrameTransformers: if isinstance(dft, cls): return dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap:", "by a list one-hot encoded columns each (or an array-valued column for the", "-> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"]", "c in matchedRulesByColumn: raise Exception(f\"More than one rule applies to column '{c}': {matchedRulesByColumn[c]},", "if not self.arrayValued: df[cols] = transform(df[cols].values) else: if len(cols) == 1: c =", "raise an error. :param arrayValuedResult: whether to replace the input columns by columns", "matching column(s) for the case where a transformation is necessary (skip=False, unsupported=False). If", "self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for i in range(encodedArray.shape[1]): df[\"%s_%d\" %", "values are not scalars but arrays (of arbitrary lengths). It is assumed that", "is None: cols = df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else", "\"\"\" Sorts a data frame's columns in ascending order \"\"\" def _apply(self, df:", "creating a separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"]", "indicating whether no transformation shall be performed on all of the columns :param", "df transformer {self.getName()} requires fitting\") df = self._apply(df) self._columnChangeTracker.trackChange(df) return df def info(self):", "data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules", "columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in", "default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns:", "value is not in the setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set):", "only the rows for which the value is not in the setToDrop \"\"\"", "data frame's columns in ascending order \"\"\" def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "is True, only a single matching column is supported, i.e. the regex must", "self._name = name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df:", "performed on the matching column(s) :param unsupported: flag indicating whether normalisation of the", "= logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame transformers, i.e.", "vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self, df:", "{', '.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df", "super().info() info[\"requireAllHandled\"] = self.requireAllHandled info[\"inplace\"] = self.inplace return info def findRule(self, colName: str)", "class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False): super().__init__() self.columnGenerators = columnGenerators self.inplace =", "= None self._paramInfo = {} # arguments passed to init that are not", "sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued = arrayValued def __setstate__(self, state):", "matching the regex :param regex: a regular expression defining the column the rule", "a data frame by applying a boolean function to one of the columns", "newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def", "a default name if the name has not been set. \"\"\" return self._name", "self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\" if", "transformer self.transformerFactory = transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns = independentColumns", "in self.columnGenerators: series = cg.generateColumn(df) df[series.name] = series return df def info(self): info", "is not None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling", "Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition = vectorizedCondition def _apply(self, df: pd.DataFrame)", "columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame): if len(self._columnsToEncode) == 0: return", "for each of the columns for the case where the rule matches multiple", "column names to which the transformation shall apply; if None, apply it to", "pd.DataFrame: if not self.inplace: df = df.copy() for cg in self.columnGenerators: series =", "True def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return", "values on a selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str =", "data frame into another (possibly applying the transformation to the original data frame", "self.regex is None: raise Exception(\"Attempted to apply a placeholder rule. Perhaps the feature", "RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic is entirely based on rules", "options. :param independentColumns: whether a separate transformation is to be learned for each", "state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"]", "not None def matchingColumns(self, columns: Sequence[str]): return [col for col in columns if", "def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]): super().__init__() self.column = column self.vectorizedCondition =", "may vary in length) are to be transformed in the same way. If", "str) -> \"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer):", "each (or an array-valued column for the case where useArrayValues=True); If None, then", "defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of rules; rules are always", "same feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df)", "any columns, transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders =", "-> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index) def info(self): info = super().info() info[\"decimals\"]", "given, the containing instance's default factory will be used. See `SkLearnTransformerFactoryFactory` for convenient", ":param arrayValued: whether to apply transformation not to scalar-valued columns but to one", "way. If multiple columns are transformed, then the arrays belonging to a single", "df.copy() cols = self.columns if cols is None: cols = df.columns transform =", "is in the setToKeep \"\"\" def __init__(self, column: str, setToKeep: Set): super().__init__() self.setToKeep", "in 'columns') or dictionary mapping column name to array of possible categories for", "column for the case where useArrayValues=True); If None, then no columns are actually", "pd.DataFrame): if len(self._columnsToEncode) == 0: return df if not self.inplace: df = df.copy()", "columns, these columns will be normalised in the same way (using the same", "name @abstractmethod def _fit(self, df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) ->", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\"", "-> pd.DataFrame: if not self.inplace: df = df.copy() cols = self.columns if cols", "regular expression defining the column the rule applies to :return: the resulting Rule", "as e: raise Exception(f\"Could not compile regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df,", "rule.transformer.transform(df[[c]].values) else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return", "on all of the columns :param unsupported: flag indicating whether normalisation of all", ":param skip: flag indicating whether no transformation shall be performed on the matching", "Callable[[Any], bool]): super().__init__() self.column = column self.condition = condition def _apply(self, df: pd.DataFrame)", "a factory for the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler) that", "factory will be used. See `SkLearnTransformerFactoryFactory` for convenient construction options. :param arrayValued: whether", "defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() if", "frame's columns in ascending order \"\"\" def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules: the set of", "transformerFactory or the containing instance's default factory will be used. NOTE: Use an", "bool]): super().__init__() self.column = column self.condition = condition def _apply(self, df: pd.DataFrame) ->", "return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using", "def _fit(self, df: pd.DataFrame): cols = self.columns if cols is None: cols =", "True and an unknown category is encountered during transform, the resulting one-hot encoded", "dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return: the list of names", "= condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\"", "False) def applyInverse(self, df): return self._apply_transformer(df, True) def info(self): info = super().info() info[\"columns\"]", "= None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag indicating", "for all rules that don't specify a particular transformer. The default transformer will", "__init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"): super().__init__() self.columnNameForResultingCounts = columnNameForResultingCounts self.columnForEntryCount =", "self.decimals = decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns,", "columns if self.matches(col)] def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False): \"\"\" :param rules:", "columns: the set of column names to which the transformation shall apply; if", "on code prior to commit 7088cbbe # They lack the __isFitted attribute and", "\"\"\" return self._name def setName(self, name): self._name = name @abstractmethod def _fit(self, df:", "= inplace self.arrayValued = arrayValued def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer,", "- 1, f\"{rule} matched no columns\") # collect specialised rule for application specialisedRule", "is not None\") self.regex = re.compile(regex) if regex is not None else None", "and retaining only the rows for which the function returns True \"\"\" def", ":param regex: a regular expression defining the column the rule applies to :return:", "= arrayValuedResult self.handleUnknown = \"ignore\" if ignoreUnknown else \"error\" if categories is not", "of the column to be modified :param columnTransform: a function operating on single", "is None self.oneHotEncoders = None if columns is None: self._columnsToEncode = [] self._columnNameRegex", "apply transformation not to scalar-valued columns but to one or more array-valued columns,", "if cols is None: cols = df.columns if not self.arrayValued: values = df[cols].values", "construction options. :param arrayValued: whether the column values are not scalars but arrays", "self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return", "on a selected column \"\"\" def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = \"counts\"):", "values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values) def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame:", "an unknown category will raise an error. :param arrayValuedResult: whether to replace the", "transformer has no effect; regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())],", "but arrays (of arbitrary lengths). It is assumed that all entries in such", "is not None: df = df.loc[self.keep] if self.drop is not None: df =", "self.toRule(None) class Rule(ToStringMixin): def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None,", "of the column to be modified :param columnTransform: a function that takes a", ":param fit: whether the rule's transformer shall be fitted :param independentColumns: whether a", "the arrays belonging to a single row must all have the same length.", "instance, which will only be applied if `transformer` is not given; if neither", "same length as columns to process\") self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for", "inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self) ->", "__init__(self, decimals=0): super().__init__() self.decimals = decimals def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data", "= inplace self._userRules = rules self._defaultTransformerFactory = defaultTransformerFactory self._rules = None def _toStringAdditionalEntries(self)", "== 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]]", "all contained feature generators \"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def info(self):", "= df.columns transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x)", "the same name containing arrays as values instead of creating a separate column", "= orRegexGroup(matchingColumns) try: specialisedRule.regex = re.compile(r) except Exception as e: raise Exception(f\"Could not", "set(matchedRulesByColumn.keys()) if len(unhandledColumns) > 0: raise Exception(f\"The following columns are not handled by", "data frame by applying a boolean function to one of the columns and", "not given; if neither `transformer` nor `transformerInstance` are given, the containing instance's default", "if len(unhandledColumns) > 0: raise Exception(f\"The following columns are not handled by any", "is not None else None self.skip = skip self.unsupported = unsupported self.transformer =", ":param defaultTransformerFactory: a factory for the creation of transformer instances (from sklearn.preprocessing, e.g.", "i)] = encodedArray[:, i] else: df[columnName] = list(encodedArray) return df def info(self): info", "regex '{r}': {e}\") self._rules.append(specialisedRule) def _checkUnhandledColumns(self, df, matchedRulesByColumn): if self.requireAllHandled: unhandledColumns = set(df.columns)", "super()._toStringAdditionalEntries() if self._rules is not None: d[\"rules\"] = self._rules else: d[\"userRules\"] = self._userRules", "`SkLearnTransformerFactoryFactory` for convenient construction options. :param independentColumns: whether a separate transformation is to", "which the transformation shall apply; if None, apply it to all columns :param", "{rule} defines no transformer and instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory()", "regex='{self._columnNameRegex}'\") if self.oneHotEncoders is None: self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column", "row in df.values] for iCol, col in enumerate(cols): df[col] = [row[:, iCol] for", "Modifies a column specified by 'column' using 'columnTransform' \"\"\" def __init__(self, column: str,", "df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is not None: df", "transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) == 0: return for", "DFTKeepColumns(DFTColumnFilter): def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self,", "such arrays are to be normalised in the same way. If arrayValued is", "to apply a placeholder rule. Perhaps the feature generator from which the rule", "transformer is not None\") self.skip = skip self.unsupported = unsupported self.transformer = transformer", "have the same length. \"\"\" super().__init__() self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns", "None: df = df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a", "in self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers)", "rules, unmatched columns will not be transformed. :param requireAllHandled: whether to raise an", "retaining only the rows for which it returns True \"\"\" def __init__(self, condition:", "'.join(map(str, self._rules))}\") def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df =", "\"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(),", "scalars but arrays (of arbitrary lengths). It is assumed that all entries in", "= df[self.column].apply(self.columnTransform) return df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column'", "RuleTemplate: def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] =", "if rule.transformer is None: if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else:", "self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex = columns", "rule for all columns matching the regex :param regex: a regular expression defining", "sklearn.preprocessing) to use (which will be fitted & applied) :param columns: the set", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application", "series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return", "row and retaining only the rows for which it returns True \"\"\" def", "DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain of data frame transformers. During", "class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a data frame's columns in ascending order \"\"\" def", "column, matched {matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif", "original data frame - in-place transformation). A data frame transformer may require being", "super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts info[\"columnForEntryCount\"] = self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self,", "its predecessor. \"\"\" def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]): super().__init__() self.dataFrameTransformers = flattenArguments(dataFrameTransformers) def", "the resulting one-hot encoded columns for this feature will be all zeros. if", "to raise an exception if not all columns are matched by a rule", "raise Exception(f\"No transformer to fit: {rule} defines no transformer and instance has no", "df class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform'. This", "column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].isin(self.setToKeep)] def info(self): info =", "return len(self.dataFrameTransformers) def _apply(self, df: pd.DataFrame) -> pd.DataFrame: for transformer in self.dataFrameTransformers: df", "is specified in 'columns') or dictionary mapping column name to array of possible", ":param skip: flag indicating whether no transformation shall be performed on all of", "by 'column' using 'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\"", "are not saved otherwise can be persisted here # for backwards compatibility with", "= transform(df[cols].values) else: if len(cols) == 1: c = cols[0] df[c] = [transform(np.array([x]).T)[:,", ":param requireAllHandled: whether to raise an exception if not all columns are matched", "__init__(self, invertibleDFT: InvertibleDataFrameTransformer): super().__init__() self.invertibleDFT = invertibleDFT def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "be learned for each of the columns for the case where the rule", "input columns by columns of the same name containing arrays as values instead", "unsupported (shall trigger an exception if attempted) :param transformer: a transformer instance (from", "info(self): info = super().info() info[\"column\"] = self.column info[\"setToDrop\"] = self.setToDrop return info class", "def isFitted(self): return self._isFitted def fitApply(self, df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df)", "pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on the selected", "if len(matchingColumns) > 1: raise Exception(f\"Array-valued case is only supported for a single", "frame transformer may require being fitted using training data. \"\"\" def __init__(self): self._name", "if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does not", "to. If it applies to multiple columns, these columns will be normalised in", "specify a particular transformer. The default transformer will only be applied to columns", "setRegex or the rule will not be applicable. :param skip: flag indicating whether", "transformer and instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: #", "Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments passed to init that are", "None) d[\"_paramInfo\"] = d.get(\"_paramInfo\", {}) self.__dict__ = d def _toStringExcludePrivate(self) -> bool: return", "rule.transformer is None: if rule.transformerFactory is not None: rule.transformer = rule.transformerFactory() else: if", "array of possible categories for the column name. If None, the possible values", "values = values.reshape((len(values), 1)) else: flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols] lengths", "array and from which the returned value will be assigned to the column", "self.condition = condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class", "return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] = self.columnNameForResultingCounts", "name to array of possible categories for the column name. If None, the", ":param columnTransform: a function operating on single cells or a Numpy ufunc that", "self.inplace return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in self._rules:", "pd.DataFrame: pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation", "Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical", "def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame:", "not None else None self.skip = skip self.unsupported = unsupported self.transformer = transformer", "always fitted and applied in the given order :param defaultTransformerFactory: a factory for", "instance has no transformer factory\") rule.transformer = self._defaultTransformerFactory() if rule.fit: # fit transformer", "{matchingColumns} for {rule}\") values = np.concatenate(applicableDF.values.flatten()) values = values.reshape((len(values), 1)) elif rule.independentColumns: values", "for which it returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition", "__setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame):", "The default transformer will only be applied to columns matched by such rules,", "def matches(self, column: str): if self.regex is None: raise Exception(\"Attempted to apply a", "for column in self._columnsToEncode} for columnName in self._columnsToEncode: self.oneHotEncoders[columnName].fit(df[[columnName]]) def _apply(self, df: pd.DataFrame):", "not saved otherwise can be persisted here # for backwards compatibility with persisted", "transformed independently (i.e. each column uses a separately trained transformation). \"\"\" def __init__(self,", "condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters", "super().__init__() self.drop = drop self.keep = keep def _apply(self, df: pd.DataFrame) -> pd.DataFrame:", "pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set = None, drop: Set", "state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df: pd.DataFrame): cols", "== dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()}", "} def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted", "columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]]) if not self.arrayValuedResult: df = df.drop(columns=columnName) for", "or transformerFactory is not None): raise ValueError(\"skip==True while transformer/transformerFactory is not None\") self.regex", "a factory. :param transformerFactory: a factory for the generation of the transformer instance,", "applying a vectorized condition on the selected column and retaining only the rows", "raise ValueError(f\"Given categories must have the same length as columns to process\") self.oneHotEncoders", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[self.keep] class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer): def __init__(self, keep: Set", "df[columnName] = list(encodedArray) return df def info(self): info = super().info() info[\"inplace\"] = self.inplace", "df: pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self,", "the rule originated was never applied in order to have the rule instantiated.\")", "else None self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory =", "isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self) -> List[str]: \"\"\" :return:", "inplace: whether to apply data frame transformations in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled", "flag indicating whether no transformation shall be performed on all of the columns", "= column def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[~df[self.column].isin(self.setToDrop)] def info(self): info", "d = super()._toStringAdditionalEntries() if self.regex is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d", "to utilise Numpy vectorisation for performance optimisation. \"\"\" def __init__(self, column: str, columnTransform:", "a whole \"\"\" super().__init__() self.column = column self.columnTransform = columnTransform def _apply(self, df:", "return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str, Sequence[str]]], categories: Union[List[np.ndarray], Dict[str, np.ndarray]]", "None: df = df.drop(columns=self.drop) return df def info(self): info = super().info() info[\"keep\"] =", "is entirely based on rules and does not need to be fitted to", "__init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo =", "else: for c in matchingColumns: if not rule.arrayValued: df[c] = rule.transformer.transform(df[[c]].values) else: df[c]", "[keep] if type(keep) == str else keep self.drop = drop def _apply(self, df:", "self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self, df: pd.DataFrame): if len(self.dataFrameTransformers) ==", "abstractmethod from typing import List, Sequence, Union, Dict, Callable, Any, Optional, Set import", "= False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d = super()._toStringAdditionalEntries() d[\"columns\"] =", "df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1))", "data frame transformers, i.e. objects which can transform one data frame into another", "cols = self.columns if cols is None: cols = df.columns if not self.arrayValued:", "info(self): info = super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info def", "the rule matches multiple columns. \"\"\" if skip and transformer is not None:", "name if the name has not been set. \"\"\" return self._name def setName(self,", "Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None, transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, arrayValued=False,", "a column (which may vary in length) are to be transformed in the", "\"\"\" return [transf.getName() for transf in self.dataFrameTransformers] def info(self): info = super().info() info[\"chainedDFTTransformerNames\"]", "def __setstate__(self, state): state[\"arrayValued\"] = state.get(\"arrayValued\", False) setstate(DFTSkLearnTransformer, self, state) def _fit(self, df:", "transformers. During fit and apply each transformer in the chain receives the transformed", "for transformer in self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self, df: pd.DataFrame):", "orRegexGroup(columns) self._columnsToEncode = columns self.inplace = inplace self.arrayValuedResult = arrayValuedResult self.handleUnknown = \"ignore\"", "transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() + [\"regex\"] def _toStringAdditionalEntries(self) -> Dict[str,", "columns matched by such rules, unmatched columns will not be transformed. :param requireAllHandled:", "transformers whose logic is entirely based on rules and does not need to", "super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df.rename(columns=self.columnsMap) class", "super().info() info[\"chainedDFTTransformerNames\"] = self.getNames() info[\"length\"] = len(self) return info def findFirstTransformerByType(self, cls) ->", "0: continue for c in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if", "pd.DataFrame: return df.rename(columns=self.columnsMap) class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame by applying a", "return df def _apply(self, df): return self._apply_transformer(df, False) def applyInverse(self, df): return self._apply_transformer(df,", "the same way. If arrayValued is True, only a single matching column is", "where sequence is specified in 'columns') or dictionary mapping column name to array", "frame by applying a boolean function to one of the columns and retaining", "is not None: if type(categories) == dict: self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown)", ":param columns: the set of column names to which the transformation shall apply;", "return for transformer in self.dataFrameTransformers[:-1]: df = transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted()", "pd.DataFrame: for transformer in self.dataFrameTransformers: df = transformer.apply(df) return df def _fit(self, df:", "indicating whether normalisation of all columns is unsupported (shall trigger an exception if", "df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self,", "if skip and transformer is not None: raise ValueError(\"skip==True while transformer is not", "self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False, transformerFactory=None)) def _toStringExcludes(self) -> List[str]: return super()._toStringExcludes() +", "rule. Perhaps the feature generator from which the rule originated was never applied", "(disabling `fit` where appropriate). Otherwise, use a factory. :param transformerFactory: a factory for", "dft return None class DFTRenameColumns(RuleBasedDataFrameTransformer): def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap:", "it returns True \"\"\" def __init__(self, condition: Callable[[Any], bool]): super().__init__() self.condition = condition", "self._paramInfo[\"inferCategories\"] = categories is None self.oneHotEncoders = None if columns is None: self._columnsToEncode", "sklearn.preprocessing import OneHotEncoder from .sklearn_transformer import SkLearnTransformerProtocol from ..columngen import ColumnGenerator from ..util", ":param transformerFactory: a factory for the generation of the transformer instance, which will", "= df.drop(self.drop) return df class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame", "= df.copy() if self.keep is not None: df = df.loc[self.keep] if self.drop is", "the selected column and retains only the rows for which the value is", "expression defining the column the rule applies to :return: the resulting Rule \"\"\"", "= None, inplace=False, ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables :param columns:", "\"\"\" Filters a data frame on the selected column and retains only the", "the column values are not scalars but arrays (of arbitrary lengths). It is", "raise an exception if not all columns are matched by a rule :param", "\"DFTNormalisation.Rule\": for rule in self._rules: if rule.matches(colName): return rule class DFTFromColumnGenerators(RuleBasedDataFrameTransformer): def __init__(self,", "d def setRegex(self, regex: str): self.regex = re.compile(regex) def matches(self, column: str): if", "inferred from the columns :param inplace: whether to perform the transformation in-place :param", "the setToDrop \"\"\" def __init__(self, column: str, setToDrop: Set): super().__init__() self.setToDrop = setToDrop", "pass def getInverse(self) -> \"InverseDataFrameTransformer\": \"\"\" :return: a transformer whose (forward) transformation is", "else keep self.drop = drop def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df =", ":param unsupported: flag indicating whether normalisation of the matching column(s) is unsupported (shall", "\"\"\" def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]): \"\"\" :param column:", "else: df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]] self._checkUnhandledColumns(df, matchedRulesByColumn) return df", "iCol] for row in transformedValues] return df def _apply(self, df): return self._apply_transformer(df, False)", "may require being fitted using training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\"", "class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a chain of data frame transformers.", "way. If arrayValued is True, only a single matching column is supported, i.e.", "convenient construction options. :param arrayValued: whether the column values are not scalars but", "== str: self._columnNameRegex = columns self._columnsToEncode = None else: self._columnNameRegex = orRegexGroup(columns) self._columnsToEncode", "matched no columns\") # collect specialised rule for application specialisedRule = copy.copy(rule) r", "info[\"inplace\"] = self.inplace return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule", "\"\"\" :param columnsMap: dictionary mapping old column names to new names \"\"\" super().__init__()", "x in df[c]] else: transformedValues = [transform(np.stack(row, axis=1)) for row in df.values] for", "= encodedArray[:, i] else: df[columnName] = list(encodedArray) return df def info(self): info =", "= unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self,", "is not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex", "in categories.items()} else: if len(columns) != len(categories): raise ValueError(f\"Given categories must have the", "= False self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None self._paramInfo = {} # arguments passed to", "is not None\") self.skip = skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory", "pd.DataFrame) -> pd.DataFrame: self.fit(df) return self.apply(df) class InvertibleDataFrameTransformer(DataFrameTransformer, ABC): @abstractmethod def applyInverse(self, df:", "code prior to commit 7088cbbe # They lack the __isFitted attribute and we", "continue for c in matchingColumns: matchedRulesByColumn[c] = rule if not rule.skip: if rule.independentColumns", "of the transformer instance, which will only be applied if `transformer` is not", "of the columns of) a data frame. If multiple columns are transformed, they", "df: pd.DataFrame) -> pd.DataFrame: return df[self.vectorizedCondition(df[self.column])] def info(self): info = super().info() info[\"column\"] =", "= transformerFactory self.independentColumns = independentColumns def toRule(self, regex: Optional[str]): \"\"\" Convert the template", "ValueError(\"skip==True while transformer is not None\") self.skip = skip self.unsupported = unsupported self.transformer", "rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for c in matchingColumns: if c in", "SkLearnTransformerProtocol] = None, independentColumns=False): \"\"\" :param skip: flag indicating whether no transformation shall", "ufunc that applies to an entire Series \"\"\" super().__init__() self.column = column self.columnTransform", "in length) are to be transformed in the same way. If multiple columns", "categories for the column name. If None, the possible values will be inferred", "ignoreUnknown=False, arrayValuedResult=False): \"\"\" One hot encode categorical variables :param columns: list of names", "re.compile(regex) if regex is not None else None self.skip = skip self.unsupported =", "in-place \"\"\" super().__init__() self.requireAllHandled = requireAllHandled self.inplace = inplace self._userRules = rules self._defaultTransformerFactory", "7088cbbe # They lack the __isFitted attribute and we assume that each such", "= column self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] =", "class DFTNormalisation(DataFrameTransformer): \"\"\" Applies normalisation/scaling to a data frame by applying a set", "data frame by applying a vectorized condition on the selected column and retaining", "the specified columns (for case where sequence is specified in 'columns') or dictionary", "fitted and applied in the given order :param defaultTransformerFactory: a factory for the", "= self.inplace return info def findRule(self, colName: str) -> \"DFTNormalisation.Rule\": for rule in", "Adds a new column with counts of the values on a selected column", "is None: self._columnsToEncode = [] self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex", "based on rules and does not need to be fitted to data\"\"\" def", "[] self._columnNameRegex = \"$\" elif type(columns) == str: self._columnNameRegex = columns self._columnsToEncode =", "transformerFactory self.arrayValued = arrayValued self.fit = fit self.independentColumns = independentColumns def __setstate__(self, state):", "not contain the same number of values: {lengths}\") values = np.stack(flatColArrays, axis=1) self.sklearnTransformer.fit(values)", "apply; if None, apply it to all columns :param inplace: whether to apply", "self.setName(f\"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}\") self.sklearnTransformer = sklearnTransformer self.columns = columns self.inplace = inplace self.arrayValued = arrayValued", "if inverse else lambda x: self.sklearnTransformer.transform(x) if not self.arrayValued: df[cols] = transform(df[cols].values) else:", "Convert the template to a rule for all columns matching the regex :param", "def __init__(self, columnsMap: Dict[str, str]): \"\"\" :param columnsMap: dictionary mapping old column names", "StandardScaler) that shall be used to create a transformer for all rules that", "column and retains only the rows for which the value is not in", "a separate column per original value \"\"\" super().__init__() self._paramInfo[\"columns\"] = columns self._paramInfo[\"inferCategories\"] =", "by applying a condition function to each row and retaining only the rows", "one-hot encoded columns each (or an array-valued column for the case where useArrayValues=True);", "be one-hot-encoded :param categories: numpy arrays containing the possible values of each of", "self._columnChangeTracker.trackChange(df) return df def info(self): return { \"name\": self.getName(), \"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker", "takes a Numpy array and from which the returned value will be assigned", "= columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df", "= None): super().__init__() self.drop = drop self.keep = keep def _apply(self, df: pd.DataFrame)", "to new names \"\"\" super().__init__() self.columnsMap = columnsMap def _apply(self, df: pd.DataFrame) ->", "are always fitted and applied in the given order :param defaultTransformerFactory: a factory", "= df.loc[self.keep] if self.drop is not None: df = df.drop(self.drop) return df class", "self.inplace = inplace def _apply(self, df: pd.DataFrame) -> pd.DataFrame: if not self.inplace: df", "in state: state[\"arrayValuedResult\"] = False super().__setstate__(state) def _toStringAdditionalEntries(self) -> Dict[str, Any]: d =", "= condition def _apply(self, df: pd.DataFrame) -> pd.DataFrame: return df[df.apply(self.condition, axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer):", "regex: a regular expression defining the column(s) the rule applies to. If it", "self._rules: matchingColumns = rule.matchingColumns(df.columns) if len(matchingColumns) == 0: continue for c in matchingColumns:", "fit=True, independentColumns=False): \"\"\" :param regex: a regular expression defining the column(s) the rule", "the inverse transformation of this DFT \"\"\" return InverseDataFrameTransformer(self) class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base", "not None: d[\"regex\"] = f\"'{self.regex.pattern}'\" return d def setRegex(self, regex: str): self.regex =", "skip self.unsupported = unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns", "df[self.columnForEntryCount].value_counts() return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values}) def info(self): info = super().info() info[\"columnNameForResultingCounts\"] =", "resulting one-hot encoded columns for this feature will be all zeros. if False,", "be performed on all of the columns :param unsupported: flag indicating whether normalisation", "unsupported self.transformer = transformer self.transformerFactory = transformerFactory self.independentColumns = independentColumns def toRule(self, regex:", "applied in order to have the rule instantiated.\") return self.regex.fullmatch(column) is not None", "\"changeInColumnNames\": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None, \"isFitted\": self.isFitted(), } def", "of columns to which it applies (learning a single transformer based on the", "\"\"\" def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None):", "the matching column(s) is unsupported (shall trigger an exception if attempted) :param transformer:", "columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name of the column to be", "def _fit(self, df: pd.DataFrame): if self._columnsToEncode is None: self._columnsToEncode = [c for c", "not None: df = df.loc[self.keep] if self.drop is not None: df = df.drop(self.drop)", "inplace=False): \"\"\" :param rules: the set of rules; rules are always fitted and", "df.columns if re.fullmatch(self._columnNameRegex, c) is not None] if len(self._columnsToEncode) == 0: log.warning(f\"{self} does", "self.columnTransform = columnTransform def _apply(self, df: pd.DataFrame) -> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return", "\"\"\" return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer, transformerFactory=self.transformerFactory, independentColumns=self.independentColumns) def toPlaceholderRule(self): return self.toRule(None) class", "separately trained transformation). \"\"\" def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False,", "df[cols] = transform(df[cols].values) else: if len(cols) == 1: c = cols[0] df[c] =", "str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the name of the column to", "assumed that all entries in such arrays are to be normalised in the", "def info(self): info = super().info() info[\"inplace\"] = self.inplace return info class DFTCountEntries(RuleBasedDataFrameTransformer): \"\"\"", "DFTModifyColumnVectorized(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform'. This transformer can", "transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x) if not", "-> pd.DataFrame: df[self.column] = self.columnTransform(df[self.column].values) return df class DFTOneHotEncoder(DataFrameTransformer): def __init__(self, columns: Optional[Union[str,", "is not None: raise ValueError(\"skip==True while transformer is not None\") self.skip = skip", "axis=1)] class DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform' \"\"\"", "'columnTransform' \"\"\" def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]): \"\"\" :param column: the", "nor `transformerInstance` are given, the containing instance's default factory will be used. See", "= rule if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns}", "ToStringMixin log = logging.getLogger(__name__) class DataFrameTransformer(ABC, ToStringMixin): \"\"\" Base class for data frame", "self.columnForEntryCount return info class DFTAggregationOnColumn(RuleBasedDataFrameTransformer): def __init__(self, columnForAggregation: str, aggregation: Callable): super().__init__() self.columnForAggregation", "DFTModifyColumn(RuleBasedDataFrameTransformer): \"\"\" Modifies a column specified by 'column' using 'columnTransform' \"\"\" def __init__(self,", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() if self.keep is not None:", "matches multiple columns. \"\"\" if skip and (transformer is not None or transformerFactory", "def fit(self, df: pd.DataFrame): self._fit(df) self._isFitted = True def isFitted(self): return self._isFitted def", "len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported according", "pd.DataFrame) -> pd.DataFrame: return df[df[self.column].apply(self.condition)] class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer): \"\"\" Filters a data frame on", "= self.arrayValuedResult info.update(self._paramInfo) return info class DFTColumnFilter(RuleBasedDataFrameTransformer): \"\"\" A DataFrame transformer that filters", "collect specialised rule for application specialisedRule = copy.copy(rule) r = orRegexGroup(matchingColumns) try: specialisedRule.regex", "matching names of columns that are to be replaced by a list one-hot", "supported, i.e. the regex must match at most one column. :param fit: whether", "apply the transformation in-place :param arrayValued: whether to apply transformation not to scalar-valued", "= d def _toStringExcludePrivate(self) -> bool: return True def getName(self) -> str: \"\"\"", "if not self.inplace: df = df.copy() for columnName in self._columnsToEncode: encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]])", "to be replaced by a list one-hot encoded columns each (or an array-valued", "df: pd.DataFrame): pass @abstractmethod def _apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self,", "if len(matchingColumns) > 0: if rule.unsupported: raise Exception(f\"Normalisation of columns {matchingColumns} is unsupported", "str, condition: Callable[[Any], bool]): super().__init__() self.column = column self.condition = condition def _apply(self,", "def info(self): info = super().info() info[\"inplace\"] = self.inplace info[\"handleUnknown\"] = self.handleUnknown info[\"arrayValuedResult\"] =", "are transformed independently (i.e. each column uses a separately trained transformation). \"\"\" def", "= transformer.fitApply(df) self.dataFrameTransformers[-1].fit(df) def isFitted(self): return all([dft.isFitted() for dft in self.dataFrameTransformers]) def getNames(self)", "class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC): \"\"\"Base class for transformers whose logic is entirely based on", "a transformation is necessary (skip=False, unsupported=False). If None is given, either transformerFactory or", "the feature generator from which the rule originated was never applied in order", "= {} self._rules = [] for rule in self._userRules: matchingColumns = rule.matchingColumns(df.columns) for", "info[\"inplace\"] = self.inplace info[\"sklearnTransformerClass\"] = self.sklearnTransformer.__class__.__name__ return info class DFTSortColumns(RuleBasedDataFrameTransformer): \"\"\" Sorts a", "fitted using training data. \"\"\" def __init__(self): self._name = f\"{self.__class__.__name__}-{id(self)}\" self._isFitted = False", "possible categories for the column name. If None, the possible values will be", "_apply(self, df: pd.DataFrame) -> pd.DataFrame: pass def apply(self, df: pd.DataFrame) -> pd.DataFrame: self._columnChangeTracker", "pd.DataFrame) -> pd.DataFrame: return self.invertibleDFT.applyInverse(df) class DataFrameTransformerChain(DataFrameTransformer): \"\"\" Supports the application of a" ]
[ "may be a API key issue please check config.json', 'type': 'news'}], \"gather news", "last 7 days and a total of: 13 new \" + \"deaths in", "Press\", 'content': \"SAINSBURY'S is reviewing a major promotion after <NAME> urged it to", "def w_api_test(): \"\"\"Checks that valid information is being gathered from the weather API\"\"\"", "pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks", "FAILED\" def hours_test(): \"\"\"Assess if hours are being correctly translated into minutes\"\"\" assert", "covid statistics test: FAILED\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics", "are not working\"\"\" import json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from", "gathered', 'content': 'sorry an unknow error occured', 'type': 'news'}], \"gather news test: FAILED\"", "are correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test:", "\"<NAME> urges Sainsbury's to think again on Nectar deal | York Press -", "tensions are relatively high based on the current election, a pandemic, and a", "to make changes to avoid customers having to visit stores unnecessarily.\", 'type': 'news'},", "' + 'of 13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\"", "\"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid API gives a valid response\"\"\"", "'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid API gives a", "json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'), 'content': ('The current", "'type': 'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data", "file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic", "are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def", "during lockdown 2 - My London', 'content': 'A small number of essential retailers", "!= [{'title': 'News cannot currently be gathered - KeyError', 'content': 'This may be", "times where tensions are relatively high based on the current election, a pandemic,", "reviewing a major promotion after <NAME> urged it to make changes to avoid", "be a API key issue please check config.json', 'type': 'weather'}, \"gather weather test:", "api key check the config file\" assert gather_weather() != [{'title': 'weather cannot be", "weather test: FAILED, this may be an issue with the api key check", "minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all the conversions", "\"gather weather test: FAILED, this may be an issue with the api key", "UK covid 19 statistics cannot be gathered', 'content': 'Please make sure you have", "be gathered - KeyError', 'content': 'This may be a API key issue please", "bad luck that we have all seemed to adopt throughout 202…', 'type': 'news'},", "one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\"", "'content': 'sorry an unknow error occured', 'type': 'news'}], \"gather news test: FAILED\" def", "== 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly translated", "major promotion after <NAME> urged it to make changes to avoid customers having", "FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test()", "from the covid api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as", "\"deaths in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test():", "test: FAILED\" def c_api_test(): \"\"\"Checks the covid API gives a valid response\"\"\" assert", "gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds", "is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather =", "\"SAINSBURY'S is reviewing a major promotion after <NAME> urged it to make changes", "on, and what it teaches all of us - Teslarati\", 'content': 'Oftentimes, many", "'News cannot currently be gathered', 'content': 'sorry an unknow error occured', 'type': 'news'}],", "to look around and realize how fortunate we are to have what we", "a total of: 13 new \" + \"deaths in the last 7 days\",", "('The current weather is \"light rain\" with tempretures ' + 'of 13.17°C that", "issue please check config.json', 'type': 'news'}], \"gather news test: FAILED, this may be", "for Exeter', 'content': \"The number of new Covid-19 cases as of 2020-11-30, are:", "realize how fortunate we are to have what we have. In times where", "news api gives a valid reponse\"\"\" result = gather_news() assert gather_news() != [{'title':", "assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online in lockdown using", "a year - Mirror Online\", 'content': 'A small number of essential retailers and", "an unknow error occured', 'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks", "!= [{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Check that", "working\"\"\" import json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import", "- KeyError', 'content': 'This may be a API key issue please check config.json',", "\"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the weather api", "the covid api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open:", "rain\" with tempretures ' + 'of 13.17°C that feels like 11.81°C'), 'type': 'weather'},", "{ 'title': 'Current covid statistic for Exeter', 'content': \"The number of new Covid-19", "seemed to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think", "days and a total of: 13 new \" + \"deaths in the last", "weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the weather api is", "being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open)", "'', 'type': 'news'}, {'title': \"Tesla's $40M loan that kept the lights on, and", "open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title':", "outlets remain open at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test():", "have used pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\" def", "having to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport", "statistic for Exeter', 'content': \"The number of new Covid-19 cases as of 2020-11-30,", "def n_anaysis_test(): \"\"\"Checks that the information from the covid api is correctly formatted", "'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid information is being", "FAILED\" def time_test(): \"\"\"Assess that all the conversions are correctly converted into one", "the config.json file are missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\" assert", "no areas of the config.json file are missing', 'type': 'covid'}], \"gather covid statistics", "Online\", 'content': 'A small number of essential retailers and food and beverage outlets", "a string of bad luck that we have all seemed to adopt throughout", "in a year - Mirror Online\", 'content': 'A small number of essential retailers", "\"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours are being correctly translated into", "correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert", "assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test()", "and food and beverage outlets remain open at the airport', 'type': 'news'}, {'title':", "to think again on Nectar deal | York Press - York Press\", 'content':", "valid reponse\"\"\" result = gather_news() assert gather_news() != [{'title': 'News cannot currently be", "'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours are being correctly translated", "total of: 597 new cases in the last 7 days and a total", "with tempretures ' + 'of 13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis", "weather API\"\"\" assert gather_weather() != {'title': 'weather cannot be gathered - KeyError', 'content':", "that information from the weather api is being correctly formatted as notifications\"\"\" with", "def hours_test(): \"\"\"Assess if hours are being correctly translated into minutes\"\"\" assert hours_to_minutes(6)", "will stay open during lockdown 2 - My London', 'content': 'A small number", "and a string of bad luck that we have all seemed to adopt", "food and beverage outlets remain open at the airport', 'type': 'news'}], \"news_anaysis test:", "'weather'}, \"gather weather test: FAILED, this may be an issue with the api", "from the weather api is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r')", "- Teslarati\", 'content': 'Oftentimes, many of us forget to look around and realize", "'of 13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test():", "the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if", "test: FAILED\" def time_test(): \"\"\"Assess that all the conversions are correctly converted into", "weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def", "correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test():", "minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes", "unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport will stay open during", "unknow error occured', 'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that", "n_api_test(): \"\"\"Checks the news api gives a valid reponse\"\"\" result = gather_news() assert", "as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid", "check the config file\" assert gather_weather() != [{'title': 'weather cannot be gathered', 'content':", "!= [{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Please make", "error occured', 'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information", "what it teaches all of us - Teslarati\", 'content': 'Oftentimes, many of us", "Heathrow Airport will stay open during lockdown 2 - My London', 'content': 'A", "the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid information", "are working as intended and includes some suggestions for if some modules are", "occured', 'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from", "test: FAILED\" def w_api_test(): \"\"\"Checks that valid information is being gathered from the", "install uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the", "== { 'title': 'Current covid statistic for Exeter', 'content': \"The number of new", "occured', 'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information", "this may be an issue with the api key check the config file\"", "'Oftentimes, many of us forget to look around and realize how fortunate we", "for first time in a year - Mirror Online\", 'content': 'A small number", "check config.json', 'type': 'news'}], \"gather news test: FAILED, this may be an issue", "api key check the config file\" assert gather_news() != [{'title': 'News cannot currently", "[{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Check that no", "13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks", "string of bad luck that we have all seemed to adopt throughout 202…',", "[{'title': 'News cannot currently be gathered - KeyError', 'content': 'This may be a", "news test: FAILED, this may be an issue with the api key check", "this hack – but be prepared to pay more - The Sun', 'content':", "assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic for Exeter', 'content': \"The", "total of: 13 new \" + \"deaths in the last 7 days\", 'type':", "into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that", "avoid customers having to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why", "days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours are being", "where tensions are relatively high based on the current election, a pandemic, and", "'type': 'news'}], \"gather news test: FAILED, this may be an issue with the", "food and beverage outlets remain open at the airport', 'type': 'news'}, {'title': \"Shoppers", "remain open at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw", "result = gather_news() assert gather_news() != [{'title': 'News cannot currently be gathered -", "'content': 'This may be a API key issue please check config.json', 'type': 'weather'},", "[{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Please make sure", "UK covid 19 statistics cannot be gathered', 'content': 'Check that no areas of", "gathered from the weather API\"\"\" assert gather_weather() != {'title': 'weather cannot be gathered", "My London', 'content': 'A small number of essential retailers and food and beverage", "'type': 'weather'}, \"gather weather test: FAILED, this may be an issue with the", "shoppers can buy clothes online in lockdown using this hack – but be", "number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test()", "with\" + \" at total of: 597 new cases in the last 7", "have what we have. In times where tensions are relatively high based on", "new cases in the last 7 days and a total of: 13 new", "after it returns to stores for first time in a year - Mirror", "remain open at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks", "a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter')", "all the conversions are correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") ==", "FAILED, this may be an issue with the api key check the config", "valid response\"\"\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be", "that kept the lights on, and what it teaches all of us -", "'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can", "statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can be correctly formated", "config file\" assert gather_news() != [{'title': 'News cannot currently be gathered', 'content': 'sorry", "config file\" assert gather_weather() != [{'title': 'weather cannot be gathered', 'content': 'sorry an", "stay open during lockdown 2 - My London', 'content': 'A small number of", "a valid reponse\"\"\" result = gather_news() assert gather_news() != [{'title': 'News cannot currently", "are to have what we have. In times where tensions are relatively high", "'London Covid: Why Heathrow Airport will stay open during lockdown 2 - My", "with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title':", "lockdown 2 - My London', 'content': 'A small number of essential retailers and", "the clock programme are working as intended and includes some suggestions for if", "api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks", "= json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'), 'content': ('The", "airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after it returns to", "'sorry an unknow error occured', 'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test():", "n_anaysis_test(): \"\"\"Checks that the information from the covid api is correctly formatted into", "news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online in lockdown using this", "test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the weather api is being", "json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online in lockdown", "of 2020-11-30, are: 69 with\" + \" at total of: 597 new cases", "think again on Nectar deal | York Press - York Press\", 'content': \"SAINSBURY'S", "c_anaysis_test(): \"\"\"Checks the covid data can be correctly formated as a notification\"\"\" with", "cannot currently be gathered', 'content': 'sorry an unknow error occured', 'type': 'news'}], \"gather", "high based on the current election, a pandemic, and a string of bad", "\"\"\"Checks that information from the weather api is being correctly formatted as notifications\"\"\"", "file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'),", "json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis", "valid information is being gathered from the weather API\"\"\" assert gather_weather() != {'title':", "you have used pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\"", "weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'), 'content':", "import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives a valid", "\"Tesla's $40M loan that kept the lights on, and what it teaches all", "Exeter'), 'content': ('The current weather is \"light rain\" with tempretures ' + 'of", "London', 'content': 'A small number of essential retailers and food and beverage outlets", "airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid information is", "notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') ==", "\"The number of new Covid-19 cases as of 2020-11-30, are: 69 with\" +", "external files used in the clock programme are working as intended and includes", "\" at total of: 597 new cases in the last 7 days and", "adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think again on", "'content': 'Please make sure you have used pip install uk-covid19', 'type': 'covid'}], \"gather", "of new Covid-19 cases as of 2020-11-30, are: 69 with\" + \" at", "test: FAILED, this may be an issue with the api key check the", "\"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from the covid", "notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title':", "be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level =", "a pandemic, and a string of bad luck that we have all seemed", "'The UK covid 19 statistics cannot be gathered', 'content': 'Please make sure you", "json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic for Exeter', 'content':", "be prepared to pay more - The Sun', 'content': '', 'type': 'news'}, {'title':", "York Press - York Press\", 'content': \"SAINSBURY'S is reviewing a major promotion after", "Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan that kept the lights", "the api key check the config file\" assert gather_weather() != [{'title': 'weather cannot", "API key issue please check config.json', 'type': 'news'}], \"gather news test: FAILED, this", "response\"\"\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered',", "In times where tensions are relatively high based on the current election, a", "'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current", "config.json file are missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics()", "can buy clothes online in lockdown using this hack – but be prepared", "missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics() != [{'title': 'The", "unknow error occured', 'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that", "\"gather news test: FAILED, this may be an issue with the api key", "weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'), 'content': ('The current weather is", "\"gather covid statistics test: FAILED\" assert covid_statistics() != [{'title': 'The UK covid 19", "11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid API gives", "to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think again", "clothes online in lockdown using this hack – but be prepared to pay", "!= {'title': 'weather cannot be gathered - KeyError', 'content': 'This may be a", "!= [{'title': 'weather cannot be gathered', 'content': 'sorry an unknow error occured', 'type':", "it returns to stores for first time in a year - Mirror Online\",", "to have what we have. In times where tensions are relatively high based", "open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current", "may be a API key issue please check config.json', 'type': 'weather'}, \"gather weather", "cases as of 2020-11-30, are: 69 with\" + \" at total of: 597", "the covid API gives a valid response\"\"\" assert covid_statistics() != [{'title': 'The UK", "key issue please check config.json', 'type': 'weather'}, \"gather weather test: FAILED, this may", "\"Shoppers stockpile Aldi's fur throw after it returns to stores for first time", "Sainsbury's to think again on Nectar deal | York Press - York Press\",", "FAILED\" def c_api_test(): \"\"\"Checks the covid API gives a valid response\"\"\" assert covid_statistics()", "c_api_test(): \"\"\"Checks the covid API gives a valid response\"\"\" assert covid_statistics() != [{'title':", "currently be gathered', 'content': 'sorry an unknow error occured', 'type': 'news'}], \"gather news", "of minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test:", "and a total of: 13 new \" + \"deaths in the last 7", "- Mirror Online\", 'content': 'A small number of essential retailers and food and", "69 with\" + \" at total of: 597 new cases in the last", "fur throw after it returns to stores for first time in a year", "== 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all the conversions are", "'content': 'Oftentimes, many of us forget to look around and realize how fortunate", "19 statistics cannot be gathered', 'content': 'Check that no areas of the config.json", "\"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test()", "on the current election, a pandemic, and a string of bad luck that", "'content': ('The current weather is \"light rain\" with tempretures ' + 'of 13.17°C", "file\" assert gather_weather() != [{'title': 'weather cannot be gathered', 'content': 'sorry an unknow", "statistics cannot be gathered', 'content': 'Check that no areas of the config.json file", "that all the conversions are correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\")", "FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16)", "have all seemed to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's", "[{'title': 'Primark shoppers can buy clothes online in lockdown using this hack –", "be gathered', 'content': 'Check that no areas of the config.json file are missing',", "not working\"\"\" import json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information", "'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan that kept the lights on,", "as file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy", "at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after it", "'type': 'news'}, {'title': \"Tesla's $40M loan that kept the lights on, and what", "2 - My London', 'content': 'A small number of essential retailers and food", "'Primark shoppers can buy clothes online in lockdown using this hack – but", "first time in a year - Mirror Online\", 'content': 'A small number of", "online in lockdown using this hack – but be prepared to pay more", "'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics() != [{'title': 'The UK covid", "hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives a valid reponse\"\"\" result =", "open during lockdown 2 - My London', 'content': 'A small number of essential", "\"\"\"checks that all external files used in the clock programme are working as", "be an issue with the api key check the config file\" assert gather_weather()", "many of us forget to look around and realize how fortunate we are", "def w_anaysis_test(): \"\"\"Checks that information from the weather api is being correctly formatted", "information from the weather api is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json',", "== [{'title': 'Primark shoppers can buy clothes online in lockdown using this hack", "be gathered', 'content': 'Please make sure you have used pip install uk-covid19', 'type':", "make sure you have used pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics", "programme are working as intended and includes some suggestions for if some modules", "working as intended and includes some suggestions for if some modules are not", "\"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid information is being gathered from", "= json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic for Exeter',", "Teslarati\", 'content': 'Oftentimes, many of us forget to look around and realize how", "\"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all the conversions are correctly converted", "but be prepared to pay more - The Sun', 'content': '', 'type': 'news'},", "an issue with the api key check the config file\" assert gather_news() !=", "throw after it returns to stores for first time in a year -", "'news'}, {'title': \"<NAME> urges Sainsbury's to think again on Nectar deal | York", "that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the", "what we have. In times where tensions are relatively high based on the", "\"\"\"Checks the news api gives a valid reponse\"\"\" result = gather_news() assert gather_news()", "7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours are", "cannot currently be gathered - KeyError', 'content': 'This may be a API key", "'news'}], \"gather news test: FAILED, this may be an issue with the api", "sure you have used pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics test:", "'content': 'A small number of essential retailers and food and beverage outlets remain", "and beverage outlets remain open at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\"", "time_test(): \"\"\"Assess that all the conversions are correctly converted into one total number\"\"\"", "'content': 'This may be a API key issue please check config.json', 'type': 'news'}],", "reponse\"\"\" result = gather_news() assert gather_news() != [{'title': 'News cannot currently be gathered", "gives a valid response\"\"\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics", "of: 597 new cases in the last 7 days and a total of:", "a valid response\"\"\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot", "hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly", "open at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that", "weather in Exeter'), 'content': ('The current weather is \"light rain\" with tempretures '", "def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test() if", "'type': 'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics() != [{'title': 'The UK", "is \"light rain\" with tempretures ' + 'of 13.17°C that feels like 11.81°C'),", "hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test()", "'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from the", "and realize how fortunate we are to have what we have. In times", "essential retailers and food and beverage outlets remain open at the airport', 'type':", "are being correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\"", "cannot be gathered - KeyError', 'content': 'This may be a API key issue", "import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion import", "'A small number of essential retailers and food and beverage outlets remain open", "== {'title': ('Current weather in Exeter'), 'content': ('The current weather is \"light rain\"", "covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Check", "cannot be gathered', 'content': 'sorry an unknow error occured', 'type': 'weather'}], \"gather weather", "file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes", "as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level,", "api gives a valid reponse\"\"\" result = gather_news() assert gather_news() != [{'title': 'News", "at total of: 597 new cases in the last 7 days and a", "gather_weather() != {'title': 'weather cannot be gathered - KeyError', 'content': 'This may be", "converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def", "\"\"\"Checks that the information from the covid api is correctly formatted into notifications\"\"\"", "news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from the covid api", "that the information from the covid api is correctly formatted into notifications\"\"\" with", "if some modules are not working\"\"\" import json from api_information import gather_news, news_anaysis,", "Airport will stay open during lockdown 2 - My London', 'content': 'A small", "around and realize how fortunate we are to have what we have. In", "cases in the last 7 days and a total of: 13 new \"", "FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can be correctly formated as a", "in the clock programme are working as intended and includes some suggestions for", "- My London', 'content': 'A small number of essential retailers and food and", "assert gather_news() != [{'title': 'News cannot currently be gathered - KeyError', 'content': 'This", "360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly translated into", "covid api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles", "Press - York Press\", 'content': \"SAINSBURY'S is reviewing a major promotion after <NAME>", "'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after it returns to stores", "forget to look around and realize how fortunate we are to have what", "York Press\", 'content': \"SAINSBURY'S is reviewing a major promotion after <NAME> urged it", "\"\"\"Assess of minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds", "– but be prepared to pay more - The Sun', 'content': '', 'type':", "'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours are being correctly", "Covid-19 cases as of 2020-11-30, are: 69 with\" + \" at total of:", "minutes_test(): \"\"\"Assess of minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960,", "def c_api_test(): \"\"\"Checks the covid API gives a valid response\"\"\" assert covid_statistics() !=", "and includes some suggestions for if some modules are not working\"\"\" import json", "def n_api_test(): \"\"\"Checks the news api gives a valid reponse\"\"\" result = gather_news()", "relatively high based on the current election, a pandemic, and a string of", "19 statistics cannot be gathered', 'content': 'Please make sure you have used pip", "into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles) ==", "13 new \" + \"deaths in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis", "beverage outlets remain open at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's", "the lights on, and what it teaches all of us - Teslarati\", 'content':", "formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert", "translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess", "teaches all of us - Teslarati\", 'content': 'Oftentimes, many of us forget to", "are relatively high based on the current election, a pandemic, and a string", "check the config file\" assert gather_news() != [{'title': 'News cannot currently be gathered',", "number of essential retailers and food and beverage outlets remain open at the", "more - The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan that", "have. In times where tensions are relatively high based on the current election,", "like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid API", "with the api key check the config file\" assert gather_news() != [{'title': 'News", "issue please check config.json', 'type': 'weather'}, \"gather weather test: FAILED, this may be", "some suggestions for if some modules are not working\"\"\" import json from api_information", "areas of the config.json file are missing', 'type': 'covid'}], \"gather covid statistics test:", "time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives a", "def c_anaysis_test(): \"\"\"Checks the covid data can be correctly formated as a notification\"\"\"", "API\"\"\" assert gather_weather() != {'title': 'weather cannot be gathered - KeyError', 'content': 'This", "loan that kept the lights on, and what it teaches all of us", "total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test()", "assert gather_weather() != {'title': 'weather cannot be gathered - KeyError', 'content': 'This may", "after <NAME> urged it to make changes to avoid customers having to visit", "'news'}, {'title': 'London Covid: Why Heathrow Airport will stay open during lockdown 2", "'This may be a API key issue please check config.json', 'type': 'news'}], \"gather", "the information from the covid api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json',", "'Exeter') == {'title': ('Current weather in Exeter'), 'content': ('The current weather is \"light", "'Check that no areas of the config.json file are missing', 'type': 'covid'}], \"gather", "covid data can be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as", "covid 19 statistics cannot be gathered', 'content': 'Please make sure you have used", "feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid", "'content': \"The number of new Covid-19 cases as of 2020-11-30, are: 69 with\"", "with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == {", "the api key check the config file\" assert gather_news() != [{'title': 'News cannot", "!= [{'title': 'News cannot currently be gathered', 'content': 'sorry an unknow error occured',", "from the weather API\"\"\" assert gather_weather() != {'title': 'weather cannot be gathered -", "w_api_test(): \"\"\"Checks that valid information is being gathered from the weather API\"\"\" assert", "includes some suggestions for if some modules are not working\"\"\" import json from", "number of new Covid-19 cases as of 2020-11-30, are: 69 with\" + \"", "of the config.json file are missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\"", "election, a pandemic, and a string of bad luck that we have all", "'Please make sure you have used pip install uk-covid19', 'type': 'covid'}], \"gather covid", "assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are", "correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level = json.load(file_open)", "formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather,", "data can be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open:", "on Nectar deal | York Press - York Press\", 'content': \"SAINSBURY'S is reviewing", "can be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r') as file_open: covid_level", "clock programme are working as intended and includes some suggestions for if some", "of us - Teslarati\", 'content': 'Oftentimes, many of us forget to look around", "customers having to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow", "covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api", "as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in", "from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives", "translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess", "luck that we have all seemed to adopt throughout 202…', 'type': 'news'}, {'title':", "with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark", "'content': \"SAINSBURY'S is reviewing a major promotion after <NAME> urged it to make", "promotion after <NAME> urged it to make changes to avoid customers having to", "pay more - The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan", "key check the config file\" assert gather_weather() != [{'title': 'weather cannot be gathered',", "gather_weather() != [{'title': 'weather cannot be gathered', 'content': 'sorry an unknow error occured',", "new \" + \"deaths in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test:", "n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test() if __name__ == '__main__': assert_external_files()", "'Exeter') == { 'title': 'Current covid statistic for Exeter', 'content': \"The number of", "assert gather_news() != [{'title': 'News cannot currently be gathered', 'content': 'sorry an unknow", "$40M loan that kept the lights on, and what it teaches all of", "the config file\" assert gather_news() != [{'title': 'News cannot currently be gathered', 'content':", "Nectar deal | York Press - York Press\", 'content': \"SAINSBURY'S is reviewing a", "being gathered from the weather API\"\"\" assert gather_weather() != {'title': 'weather cannot be", "[{'title': 'weather cannot be gathered', 'content': 'sorry an unknow error occured', 'type': 'weather'}],", "test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test()", "prepared to pay more - The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's", "api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion", "all of us - Teslarati\", 'content': 'Oftentimes, many of us forget to look", "covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news", "fortunate we are to have what we have. In times where tensions are", "that we have all seemed to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME>", "gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes,", "make changes to avoid customers having to visit stores unnecessarily.\", 'type': 'news'}, {'title':", "issue with the api key check the config file\" assert gather_weather() != [{'title':", "w_anaysis_test(): \"\"\"Checks that information from the weather api is being correctly formatted as", "assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered', 'content':", "\" + \"deaths in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\"", "an issue with the api key check the config file\" assert gather_weather() !=", "'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from", "\"\"\"Checks that valid information is being gathered from the weather API\"\"\" assert gather_weather()", "weather api is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open:", "current weather is \"light rain\" with tempretures ' + 'of 13.17°C that feels", "that valid information is being gathered from the weather API\"\"\" assert gather_weather() !=", "uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid", "of bad luck that we have all seemed to adopt throughout 202…', 'type':", "small number of essential retailers and food and beverage outlets remain open at", "Mirror Online\", 'content': 'A small number of essential retailers and food and beverage", "Covid: Why Heathrow Airport will stay open during lockdown 2 - My London',", "covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic for Exeter', 'content': \"The number", "urges Sainsbury's to think again on Nectar deal | York Press - York", "import json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics,", "stockpile Aldi's fur throw after it returns to stores for first time in", "from api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test():", "covid API gives a valid response\"\"\" assert covid_statistics() != [{'title': 'The UK covid", "Why Heathrow Airport will stay open during lockdown 2 - My London', 'content':", "retailers and food and beverage outlets remain open at the airport', 'type': 'news'}],", "== 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test()", "using this hack – but be prepared to pay more - The Sun',", "'weather cannot be gathered', 'content': 'sorry an unknow error occured', 'type': 'weather'}], \"gather", "{'title': 'weather cannot be gathered - KeyError', 'content': 'This may be a API", "current election, a pandemic, and a string of bad luck that we have", "be gathered', 'content': 'sorry an unknow error occured', 'type': 'weather'}], \"gather weather test:", "a API key issue please check config.json', 'type': 'news'}], \"gather news test: FAILED,", "the weather API\"\"\" assert gather_weather() != {'title': 'weather cannot be gathered - KeyError',", "be a API key issue please check config.json', 'type': 'news'}], \"gather news test:", "and what it teaches all of us - Teslarati\", 'content': 'Oftentimes, many of", "{'title': \"Shoppers stockpile Aldi's fur throw after it returns to stores for first", "it teaches all of us - Teslarati\", 'content': 'Oftentimes, many of us forget", "to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport will", "visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport will stay", "cannot be gathered', 'content': 'Check that no areas of the config.json file are", "FAILED\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered',", "covid 19 statistics cannot be gathered', 'content': 'Check that no areas of the", "gives a valid reponse\"\"\" result = gather_news() assert gather_news() != [{'title': 'News cannot", "buy clothes online in lockdown using this hack – but be prepared to", "the current election, a pandemic, and a string of bad luck that we", "urged it to make changes to avoid customers having to visit stores unnecessarily.\",", "test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly translated into seconds\"\"\" assert", "'sorry an unknow error occured', 'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test():", "'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport will stay open during lockdown", "news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds,", "intended and includes some suggestions for if some modules are not working\"\"\" import", "if hours are being correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes", "+ \" at total of: 597 new cases in the last 7 days", "error occured', 'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the", "articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online", "again on Nectar deal | York Press - York Press\", 'content': \"SAINSBURY'S is", "used in the clock programme are working as intended and includes some suggestions", "{'title': ('Current weather in Exeter'), 'content': ('The current weather is \"light rain\" with", "in lockdown using this hack – but be prepared to pay more -", "is reviewing a major promotion after <NAME> urged it to make changes to", "in Exeter'), 'content': ('The current weather is \"light rain\" with tempretures ' +", "960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all the conversions are correctly", "FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the weather api is being correctly", "open at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after", "hours_test(): \"\"\"Assess if hours are being correctly translated into minutes\"\"\" assert hours_to_minutes(6) ==", "minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives a valid reponse\"\"\" result", "def minutes_test(): \"\"\"Assess of minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) ==", "based on the current election, a pandemic, and a string of bad luck", "API key issue please check config.json', 'type': 'weather'}, \"gather weather test: FAILED, this", "year - Mirror Online\", 'content': 'A small number of essential retailers and food", "+ \"deaths in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def", "lockdown using this hack – but be prepared to pay more - The", "key issue please check config.json', 'type': 'news'}], \"gather news test: FAILED, this may", "information is being gathered from the weather API\"\"\" assert gather_weather() != {'title': 'weather", "2020-11-30, are: 69 with\" + \" at total of: 597 new cases in", "of: 13 new \" + \"deaths in the last 7 days\", 'type': 'covid'},", "api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles =", "us forget to look around and realize how fortunate we are to have", "last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess if hours", "{'title': \"Tesla's $40M loan that kept the lights on, and what it teaches", "('Current weather in Exeter'), 'content': ('The current weather is \"light rain\" with tempretures", "as intended and includes some suggestions for if some modules are not working\"\"\"", "\"\"\"Checks the covid data can be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json',", "- York Press\", 'content': \"SAINSBURY'S is reviewing a major promotion after <NAME> urged", "'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather", "of essential retailers and food and beverage outlets remain open at the airport',", "open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers", "into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of", "the conversions are correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300,", "FAILED\" def w_api_test(): \"\"\"Checks that valid information is being gathered from the weather", "covid_level = json.load(file_open) assert covid_data_analysis(covid_level, 'Exeter') == { 'title': 'Current covid statistic for", "how fortunate we are to have what we have. In times where tensions", "is being gathered from the weather API\"\"\" assert gather_weather() != {'title': 'weather cannot", "that no areas of the config.json file are missing', 'type': 'covid'}], \"gather covid", "'type': 'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the", "are missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics() != [{'title':", "'The UK covid 19 statistics cannot be gathered', 'content': 'Check that no areas", "beverage outlets remain open at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def", "stores for first time in a year - Mirror Online\", 'content': 'A small", "may be an issue with the api key check the config file\" assert", "key check the config file\" assert gather_news() != [{'title': 'News cannot currently be", "\"gather covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can be", "[{'title': 'News cannot currently be gathered', 'content': 'sorry an unknow error occured', 'type':", "| York Press - York Press\", 'content': \"SAINSBURY'S is reviewing a major promotion", "<reponame>YannisLawrence1/Covid-Alarm<gh_stars>0 \"\"\"checks that all external files used in the clock programme are working", "with the api key check the config file\" assert gather_weather() != [{'title': 'weather", "and food and beverage outlets remain open at the airport', 'type': 'news'}], \"news_anaysis", "the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after it returns", "Aldi's fur throw after it returns to stores for first time in a", "issue with the api key check the config file\" assert gather_news() != [{'title':", "hack – but be prepared to pay more - The Sun', 'content': '',", "'News cannot currently be gathered - KeyError', 'content': 'This may be a API", "throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think again on Nectar", "new Covid-19 cases as of 2020-11-30, are: 69 with\" + \" at total", "covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered', 'content': 'Please", "\"\"\"Assess that all the conversions are correctly converted into one total number\"\"\" assert", "deal | York Press - York Press\", 'content': \"SAINSBURY'S is reviewing a major", "used pip install uk-covid19', 'type': 'covid'}], \"gather covid statistics test: FAILED\" def c_anaysis_test():", "files used in the clock programme are working as intended and includes some", "covid statistics test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can be correctly", "being correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def", "test: FAILED\" def hours_test(): \"\"\"Assess if hours are being correctly translated into minutes\"\"\"", "597 new cases in the last 7 days and a total of: 13", "all seemed to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to", "= json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online in", "we have all seemed to adopt throughout 202…', 'type': 'news'}, {'title': \"<NAME> urges", "test: FAILED\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be", "<NAME> urged it to make changes to avoid customers having to visit stores", "to pay more - The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M", "all external files used in the clock programme are working as intended and", "a major promotion after <NAME> urged it to make changes to avoid customers", "gathered', 'content': 'Check that no areas of the config.json file are missing', 'type':", "+ 'of 13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test: FAILED\" def", "into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files():", "in the last 7 days\", 'type': 'covid'}, \"covid_data_analysis test: FAILED\" def hours_test(): \"\"\"Assess", "'news'}, {'title': \"Tesla's $40M loan that kept the lights on, and what it", "kept the lights on, and what it teaches all of us - Teslarati\",", "'title': 'Current covid statistic for Exeter', 'content': \"The number of new Covid-19 cases", "gathered - KeyError', 'content': 'This may be a API key issue please check", "'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid information is being gathered", "n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test() if __name__ == '__main__':", "look around and realize how fortunate we are to have what we have.", "assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test() if __name__", "currently be gathered - KeyError', 'content': 'This may be a API key issue", "202…', 'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think again on Nectar deal", "hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the news api gives a valid reponse\"\"\"", "def time_test(): \"\"\"Assess that all the conversions are correctly converted into one total", "assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all the", "we have. In times where tensions are relatively high based on the current", "information from the covid api is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r')", "correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds test: FAILED\"", "KeyError', 'content': 'This may be a API key issue please check config.json', 'type':", "stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid: Why Heathrow Airport will stay open", "test: FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from the covid api is", "the weather api is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as", "an unknow error occured', 'type': 'news'}], \"gather news test: FAILED\" def n_anaysis_test(): \"\"\"Checks", "\"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test() hours_test() minutes_test() time_test() if __name__ ==", "the last 7 days and a total of: 13 new \" + \"deaths", "correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test: FAILED\" def minutes_test():", "45300, \"hhmm_to_seconds test: FAILED\" def assert_external_files(): \"\"\"\"\"\" n_api_test() n_anaysis_test() w_api_test() w_anaysis_test() c_api_test() c_anaysis_test()", "FAILED\" def n_anaysis_test(): \"\"\"Checks that the information from the covid api is correctly", "'This may be a API key issue please check config.json', 'type': 'weather'}, \"gather", "as of 2020-11-30, are: 69 with\" + \" at total of: 597 new", "notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter') ==", "that all external files used in the clock programme are working as intended", "\"hours_to_minutes test: FAILED\" def minutes_test(): \"\"\"Assess of minutes are correctly translated into seconds\"\"\"", "The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan that kept the", "= gather_news() assert gather_news() != [{'title': 'News cannot currently be gathered - KeyError',", "cannot be gathered', 'content': 'Please make sure you have used pip install uk-covid19',", "'weather'}, \"weather_analysis test: FAILED\" def c_api_test(): \"\"\"Checks the covid API gives a valid", "as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert weather_analysis(weather, 'Exeter')", "are: 69 with\" + \" at total of: 597 new cases in the", "be an issue with the api key check the config file\" assert gather_news()", "time in a year - Mirror Online\", 'content': 'A small number of essential", "the config file\" assert gather_weather() != [{'title': 'weather cannot be gathered', 'content': 'sorry", "suggestions for if some modules are not working\"\"\" import json from api_information import", "some modules are not working\"\"\" import json from api_information import gather_news, news_anaysis, gather_weather,", "at the airport', 'type': 'news'}], \"news_anaysis test: FAILED\" def w_api_test(): \"\"\"Checks that valid", "\"light rain\" with tempretures ' + 'of 13.17°C that feels like 11.81°C'), 'type':", "statistics test: FAILED\" assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot", "gathered', 'content': 'sorry an unknow error occured', 'type': 'weather'}], \"gather weather test: FAILED\"", "Exeter', 'content': \"The number of new Covid-19 cases as of 2020-11-30, are: 69", "correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather = json.load(file_open) assert", "{'title': 'London Covid: Why Heathrow Airport will stay open during lockdown 2 -", "'type': 'news'}, {'title': \"<NAME> urges Sainsbury's to think again on Nectar deal |", "gather_news() != [{'title': 'News cannot currently be gathered - KeyError', 'content': 'This may", "{'title': \"<NAME> urges Sainsbury's to think again on Nectar deal | York Press", "pandemic, and a string of bad luck that we have all seemed to", "config.json', 'type': 'weather'}, \"gather weather test: FAILED, this may be an issue with", "api is being correctly formatted as notifications\"\"\" with open('json_tests/weather-exeter.json', 'r') as file_open: weather", "'weather'}], \"gather weather test: FAILED\" def w_anaysis_test(): \"\"\"Checks that information from the weather", "outlets remain open at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile Aldi's fur", "assert gather_weather() != [{'title': 'weather cannot be gathered', 'content': 'sorry an unknow error", "formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles)", "be gathered', 'content': 'sorry an unknow error occured', 'type': 'news'}], \"gather news test:", "7 days and a total of: 13 new \" + \"deaths in the", "seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\" def time_test(): \"\"\"Assess that all", "'Current covid statistic for Exeter', 'content': \"The number of new Covid-19 cases as", "config.json', 'type': 'news'}], \"gather news test: FAILED, this may be an issue with", "the covid data can be correctly formated as a notification\"\"\" with open('json_tests/covid-devon.json', 'r')", "returns to stores for first time in a year - Mirror Online\", 'content':", "for if some modules are not working\"\"\" import json from api_information import gather_news,", "'r') as file_open: articles = json.load(file_open) assert news_anaysis(articles) == [{'title': 'Primark shoppers can", "check config.json', 'type': 'weather'}, \"gather weather test: FAILED, this may be an issue", "to avoid customers having to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London Covid:", "'news'}, {'title': \"Shoppers stockpile Aldi's fur throw after it returns to stores for", "changes to avoid customers having to visit stores unnecessarily.\", 'type': 'news'}, {'title': 'London", "'weather cannot be gathered - KeyError', 'content': 'This may be a API key", "\"\"\"Assess if hours are being correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360,", "gather_news() assert gather_news() != [{'title': 'News cannot currently be gathered - KeyError', 'content':", "covid statistic for Exeter', 'content': \"The number of new Covid-19 cases as of", "gather_news() != [{'title': 'News cannot currently be gathered', 'content': 'sorry an unknow error", "is correctly formatted into notifications\"\"\" with open('json_tests/gb-news.json', 'r') as file_open: articles = json.load(file_open)", "\"\"\"Checks the covid API gives a valid response\"\"\" assert covid_statistics() != [{'title': 'The", "us - Teslarati\", 'content': 'Oftentimes, many of us forget to look around and", "hours are being correctly translated into minutes\"\"\" assert hours_to_minutes(6) == 360, \"hours_to_minutes test:", "and beverage outlets remain open at the airport', 'type': 'news'}, {'title': \"Shoppers stockpile", "test: FAILED\" def c_anaysis_test(): \"\"\"Checks the covid data can be correctly formated as", "it to make changes to avoid customers having to visit stores unnecessarily.\", 'type':", "gathered', 'content': 'Please make sure you have used pip install uk-covid19', 'type': 'covid'}],", "modules are not working\"\"\" import json from api_information import gather_news, news_anaysis, gather_weather, weather_analysis", "import covid_statistics, covid_data_analysis from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds def n_api_test(): \"\"\"Checks the", "lights on, and what it teaches all of us - Teslarati\", 'content': 'Oftentimes,", "conversions are correctly converted into one total number\"\"\" assert hhmm_to_seconds(\"12:35\") == 45300, \"hhmm_to_seconds", "tempretures ' + 'of 13.17°C that feels like 11.81°C'), 'type': 'weather'}, \"weather_analysis test:", "weather is \"light rain\" with tempretures ' + 'of 13.17°C that feels like", "of us forget to look around and realize how fortunate we are to", "from api_information import gather_news, news_anaysis, gather_weather, weather_analysis from api_information import covid_statistics, covid_data_analysis from", "please check config.json', 'type': 'news'}], \"gather news test: FAILED, this may be an", "- The Sun', 'content': '', 'type': 'news'}, {'title': \"Tesla's $40M loan that kept", "API gives a valid response\"\"\" assert covid_statistics() != [{'title': 'The UK covid 19", "file are missing', 'type': 'covid'}], \"gather covid statistics test: FAILED\" assert covid_statistics() !=", "file\" assert gather_news() != [{'title': 'News cannot currently be gathered', 'content': 'sorry an", "assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'), 'content': ('The current weather", "we are to have what we have. In times where tensions are relatively", "please check config.json', 'type': 'weather'}, \"gather weather test: FAILED, this may be an", "retailers and food and beverage outlets remain open at the airport', 'type': 'news'},", "'content': 'sorry an unknow error occured', 'type': 'weather'}], \"gather weather test: FAILED\" def", "minutes are correctly translated into seconds\"\"\" assert minutes_to_seconds(16) == 960, \"minutes_to_seconds test: FAILED\"", "statistics cannot be gathered', 'content': 'Please make sure you have used pip install", "a API key issue please check config.json', 'type': 'weather'}, \"gather weather test: FAILED,", "in the last 7 days and a total of: 13 new \" +", "the news api gives a valid reponse\"\"\" result = gather_news() assert gather_news() !=", "'content': 'Check that no areas of the config.json file are missing', 'type': 'covid'}],", "to stores for first time in a year - Mirror Online\", 'content': 'A" ]
[ "for k, v in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes =", "The result is a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset)", "imports are needed so that eval can work from tensorflow import TensorShape #", "example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f)", "won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))", "noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path", "A tf.Operation that, when run, writes contents of dataset to a file. When", "of dataset to a file. When running in eager mode, calling this function", "returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the structure so that", "as f: meta = json.load(f) for k, v in meta.items(): meta[k] = eval(v)", "value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return", "= eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i", "= [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)] args = [tf.reshape(v, s)", "output_types)] args = [tf.reshape(v, s) for v, s in zip(args, output_shapes)] return tf.nest.pack_sequence_as(meta[\"output_types\"],", "a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset,", "EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset", ") args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t in zip(args,", "feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input tf.Example proto using", "= tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for", "function. num_parallel_reads: int A `tf.int64` scalar representing the number of files to read", "def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters ---------- dataset", "it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\")", "repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta, f) # create", "A `tf.int64` scalar representing the number of files to read in parallel. Defaults", "for i, f in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto =", "__future__ import print_function import json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def", "= tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for", "def serialize_example_pyfunction(*args): feature = {} for i, f in enumerate(args): key = f\"feature{i}\"", "k, v in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"])", "json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output): if not", "returns a dataset. The TFRecord file must have been created using the :any:`dataset_to_tfrecord`", "want to write into a TFRecord file. output : str Path to the", "tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The result is a scalar dataset", "= tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset.", "will write the file. Otherwise, you have to call session.run() on the returned", "tf.string) return tf.reshape(tf_string, ()) # The result is a scalar dataset = dataset.map(tf_serialize_example)", "# create a custom map function that serializes the dataset def serialize_example_pyfunction(*args): feature", "when you want to convert the TFRecord file back into a dataset. Returns", "file. Besides this file, a .json file is also created. This json file", "result is a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def", "contains the data from the TFRecord file. \"\"\" # these imports are needed", "[tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ())", "__future__ import absolute_import from __future__ import division from __future__ import print_function import json", "str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output =", "json.load(f) for k, v in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes", "Pass a list if you are sure several tfrecords need the same map", "TFRecord file. Pass a list if you are sure several tfrecords need the", "return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't", "= json.load(f) for k, v in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"])", "operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the structure so that we", "must have been created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str", "json.dump(meta, f) # create a custom map function that serializes the dataset def", "tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f", "for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) #", "dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters ---------- dataset :", "have been created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or", "= dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and", "absolute_import from __future__ import division from __future__ import print_function import json import tensorflow", "output, json_output = tfrecord_name_and_json_name(output) # dump the structure so that we can read", "= output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT):", "output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def", "to call session.run() on the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) #", "not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value", "dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns", "want to convert the TFRecord file back into a dataset. Returns ------- ``tf.Operation``", "tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta =", "_parse_function(example_proto): # Parse the input tf.Example proto using the dictionary above. args =", "scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads", "if you are sure several tfrecords need the same map function. num_parallel_reads: int", "a custom map function that serializes the dataset def serialize_example_pyfunction(*args): feature = {}", "with open(json_output, \"w\") as f: json.dump(meta, f) # create a custom map function", "tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The", ": str or list Path to the TFRecord file. Pass a list if", "return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters", "for TFRecords \"\"\" from __future__ import absolute_import from __future__ import division from __future__", "and returns a dataset. The TFRecord file must have been created using the", "that, when run, writes contents of dataset to a file. When running in", "a TFRecord file. output : str Path to the TFRecord file. Besides this", "def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args] tf_string", "for path in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for path in", "eval can work from tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord, str):", "range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the", "feature_description = {} for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([],", "from __future__ import print_function import json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\"", "you have to call session.run() on the returned operation. \"\"\" output, json_output =", "Path to the TFRecord file. Pass a list if you are sure several", "dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords", "the structure so that we can read it back meta = { \"output_types\":", "writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The TFRecord file", "needed so that eval can work from tensorflow import TensorShape # noqa: F401", "to the TFRecord file. Besides this file, a .json file is also created.", "reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains the data from", "[tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1] tfrecord =", "file. \"\"\" # these imports are needed so that eval can work from", "work from tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord =", "a file. When running in eager mode, calling this function will write the", "Path to the TFRecord file. Besides this file, a .json file is also", "as f: json.dump(meta, f) # create a custom map function that serializes the", "dataset def serialize_example_pyfunction(*args): feature = {} for i, f in enumerate(args): key =", "the same map function. num_parallel_reads: int A `tf.int64` scalar representing the number of", "isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output", "several tfrecords need the same map function. num_parallel_reads: int A `tf.int64` scalar representing", "from __future__ import division from __future__ import print_function import json import tensorflow as", "if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord]", "dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The TFRecord file must have", "in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The result", "map function. num_parallel_reads: int A `tf.int64` scalar representing the number of files to", "A dataset that contains the data from the TFRecord file. \"\"\" # these", "# Parse the input tf.Example proto using the dictionary above. args = tf.io.parse_single_example(", "unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def", "tf.data.Dataset into a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that", "dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want to write into a TFRecord", "[tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for path", "When running in eager mode, calling this function will write the file. Otherwise,", "The TFRecord file must have been created using the :any:`dataset_to_tfrecord` function. Parameters ----------", "the file. Otherwise, you have to call session.run() on the returned operation. \"\"\"", "enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def", "dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when run, writes contents of dataset", "is also created. This json file is needed when you want to convert", "from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes", "= [path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with", "files to read in parallel. Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset``", ".json file is also created. This json file is needed when you want", "output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)): key = f\"feature{i}\"", "files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains the data from the", "tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for path in tfrecord] raw_dataset =", "convert the TFRecord file back into a dataset. Returns ------- ``tf.Operation`` A tf.Operation", "value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def", "file is needed when you want to convert the TFRecord file back into", "= tfrecord_name_and_json_name(output) # dump the structure so that we can read it back", "created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or list Path", "int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord file.", "\"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta, f) #", "= tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)] args", "i, f in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature))", "{ \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta, f)", "normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value,", "output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)):", "__future__ import division from __future__ import print_function import json import tensorflow as tf", "= [tf.reshape(v, s) for v, s in zip(args, output_shapes)] return tf.nest.pack_sequence_as(meta[\"output_types\"], args) return", "tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)] args =", "tf.Example proto using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args", "tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters ----------", "is needed when you want to convert the TFRecord file back into a", "string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output):", "type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor.", "file. output : str Path to the TFRecord file. Besides this file, a", "session.run() on the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the", "writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a", "to write into a TFRecord file. output : str Path to the TFRecord", "an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a", "= [tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string,", "are sure several tfrecords need the same map function. num_parallel_reads: int A `tf.int64`", "output += TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy()", "contents of dataset to a file. When running in eager mode, calling this", "import json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output =", "raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta = json.load(f)", "serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t", "json_output = tfrecord[0][1] tfrecord = [path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset(", "for v, t in zip(args, output_types)] args = [tf.reshape(v, s) for v, s", "back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as", "mode, calling this function will write the file. Otherwise, you have to call", "create a custom map function that serializes the dataset def serialize_example_pyfunction(*args): feature =", "TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] +", "TFRecords and returns a dataset. The TFRecord file must have been created using", "tf.string) def _parse_function(example_proto): # Parse the input tf.Example proto using the dictionary above.", "in eager mode, calling this function will write the file. Otherwise, you have", "\".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return", "meta = json.load(f) for k, v in meta.items(): meta[k] = eval(v) output_types =", "tfrecord[0][1] tfrecord = [path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads", "= tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)): key", "if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))):", ": ``tf.data.Dataset`` The tf.data.Dataset that you want to write into a TFRecord file.", "file, a .json file is also created. This json file is needed when", "a .json file is also created. This json file is needed when you", "---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want to write into a", "num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The TFRecord file must have been", "F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in", "output : str Path to the TFRecord file. Besides this file, a .json", "so that eval can work from tensorflow import TensorShape # noqa: F401 if", "tf.Operation that, when run, writes contents of dataset to a file. When running", "tf.reshape(tf_string, ()) # The result is a scalar dataset = dataset.map(tf_serialize_example) writer =", "the TFRecord file. Besides this file, a .json file is also created. This", "read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output,", "tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The result is a", "run, writes contents of dataset to a file. When running in eager mode,", "def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a TFRecord", "\"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta, f) # create a", "args = [tf.reshape(v, s) for v, s in zip(args, output_shapes)] return tf.nest.pack_sequence_as(meta[\"output_types\"], args)", "tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta = json.load(f) for k, v", "-len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output +=", "in parallel. Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that", "in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse", "args = [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)] args = [tf.reshape(v,", "in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString()", "same map function. num_parallel_reads: int A `tf.int64` scalar representing the number of files", "= \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\"", "Parameters ---------- tfrecord : str or list Path to the TFRecord file. Pass", "that you want to write into a TFRecord file. output : str Path", "can work from tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord", "this file, a .json file is also created. This json file is needed", "key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args):", "+ \".json\" return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT", "the TFRecord file. Pass a list if you are sure several tfrecords need", "the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or list Path to the", "output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value =", "= f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args", "= tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The result is a scalar", "from tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord]", "tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into a", "tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord", "a dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when run, writes contents of", "\"\"\"Reads TFRecords and returns a dataset. The TFRecord file must have been created", "args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction,", "function will write the file. Otherwise, you have to call session.run() on the", "f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input tf.Example proto", "args, tf.string) return tf.reshape(tf_string, ()) # The result is a scalar dataset =", "+= TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() #", "} with open(json_output, \"w\") as f: json.dump(meta, f) # create a custom map", "a tf.data.Dataset into a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset", "you want to write into a TFRecord file. output : str Path to", "TFRecord file. \"\"\" # these imports are needed so that eval can work", "in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta", "tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta = json.load(f) for k,", "args = [tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return", "Besides this file, a .json file is also created. This json file is", "list if you are sure several tfrecords need the same map function. num_parallel_reads:", "tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0]", "tfrecord = [path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads )", "= tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args,", "open(json_output) as f: meta = json.load(f) for k, v in meta.items(): meta[k] =", "the input tf.Example proto using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description", ":any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or list Path to the TFRecord", "file. Otherwise, you have to call session.run() on the returned operation. \"\"\" output,", "# dump the structure so that we can read it back meta =", "return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output", "Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want to write into", "when run, writes contents of dataset to a file. When running in eager", "def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value): if", "def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The TFRecord file must", "TFRecord file must have been created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord", "def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output,", "------- ``tf.Operation`` A tf.Operation that, when run, writes contents of dataset to a", "file. Pass a list if you are sure several tfrecords need the same", "sure several tfrecords need the same map function. num_parallel_reads: int A `tf.int64` scalar", "proto using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args =", "file must have been created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord :", "custom map function that serializes the dataset def serialize_example_pyfunction(*args): feature = {} for", "in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for path in tfrecord] raw_dataset", "args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t)", "\"\"\" # these imports are needed so that eval can work from tensorflow", "dump the structure so that we can read it back meta = {", "f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args =", "v, t in zip(args, output_types)] args = [tf.reshape(v, s) for v, s in", "the data from the TFRecord file. \"\"\" # these imports are needed so", "data from the TFRecord file. \"\"\" # these imports are needed so that", "= [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1] tfrecord", "you want to convert the TFRecord file back into a dataset. Returns -------", "is a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord,", "``tf.data.Dataset`` The tf.data.Dataset that you want to write into a TFRecord file. output", "f in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return", "with open(json_output) as f: meta = json.load(f) for k, v in meta.items(): meta[k]", "using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or list Path to", "list Path to the TFRecord file. Pass a list if you are sure", "[tf.reshape(v, s) for v, s in zip(args, output_shapes)] return tf.nest.pack_sequence_as(meta[\"output_types\"], args) return raw_dataset.map(_parse_function)", "output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack", "if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from", "path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f:", "representing the number of files to read in parallel. Defaults to reading files", "dataset to a file. When running in eager mode, calling this function will", "TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want to", "tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string)", "= f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input tf.Example", "def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a", "= normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output):", ": str Path to the TFRecord file. Besides this file, a .json file", "into a dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when run, writes contents", "``tf.data.Dataset`` A dataset that contains the data from the TFRecord file. \"\"\" #", "= {} for i, f in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f)", "tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)): key =", "TFRecord file. Besides this file, a .json file is also created. This json", "created. This json file is needed when you want to convert the TFRecord", "scalar representing the number of files to read in parallel. Defaults to reading", "back into a dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when run, writes", "{} for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def", "import print_function import json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output):", "def _parse_function(example_proto): # Parse the input tf.Example proto using the dictionary above. args", "tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output", "a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):", "return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in", "that eval can work from tensorflow import TensorShape # noqa: F401 if isinstance(tfrecord,", "f) # create a custom map function that serializes the dataset def serialize_example_pyfunction(*args):", "feature = {} for i, f in enumerate(args): key = f\"feature{i}\" feature[key] =", "[path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output)", "return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def dataset_to_tfrecord(dataset, output): \"\"\"Writes a tf.data.Dataset into", "you are sure several tfrecords need the same map function. num_parallel_reads: int A", "also created. This json file is needed when you want to convert the", "file back into a dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when run,", "writes contents of dataset to a file. When running in eager mode, calling", "write into a TFRecord file. output : str Path to the TFRecord file.", "to reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains the data", "{} for i, f in enumerate(args): key = f\"feature{i}\" feature[key] = bytes_feature(f) example_proto", "above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v,", "f: json.dump(meta, f) # create a custom map function that serializes the dataset", "= tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input tf.Example proto using the", "\".json\" return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return", "eager mode, calling this function will write the file. Otherwise, you have to", "import absolute_import from __future__ import division from __future__ import print_function import json import", "# BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value):", "``tf.Operation`` A tf.Operation that, when run, writes contents of dataset to a file.", "map function that serializes the dataset def serialize_example_pyfunction(*args): feature = {} for i,", "running in eager mode, calling this function will write the file. Otherwise, you", "for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as", "tfrecord : str or list Path to the TFRecord file. Pass a list", "t in zip(args, output_types)] args = [tf.reshape(v, s) for v, s in zip(args,", "to read in parallel. Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset`` A", "input tf.Example proto using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description )", "import TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord =", "the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args", "import division from __future__ import print_function import json import tensorflow as tf TFRECORDS_EXT", "the number of files to read in parallel. Defaults to reading files sequentially.", "read in parallel. Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset", "tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v,", "tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input tf.Example proto using the dictionary", "dataset. The TFRecord file must have been created using the :any:`dataset_to_tfrecord` function. Parameters", "Otherwise, you have to call session.run() on the returned operation. \"\"\" output, json_output", "str Path to the TFRecord file. Besides this file, a .json file is", "on the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the structure", "meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f:", "the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the structure so", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1]", "file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want to write", "TFRECORDS_EXT return output def bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList", "output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output", "meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {}", "bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args =", "TensorShape # noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path)", "= tfrecord[0][1] tfrecord = [path[0] for path in tfrecord] raw_dataset = tf.data.TFRecordDataset( tfrecord,", "num_parallel_reads: int A `tf.int64` scalar representing the number of files to read in", "can read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with", "f: meta = json.load(f) for k, v in meta.items(): meta[k] = eval(v) output_types", "open(json_output, \"w\") as f: json.dump(meta, f) # create a custom map function that", "or list Path to the TFRecord file. Pass a list if you are", "using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args)", "tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] =", "isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an", "BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def int64_feature(value): return", "tfrecord_name_and_json_name(output) # dump the structure so that we can read it back meta", "tfrecords need the same map function. num_parallel_reads: int A `tf.int64` scalar representing the", "into a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you", "into a TFRecord file. output : str Path to the TFRecord file. Besides", "= tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key]", "return tf.reshape(tf_string, ()) # The result is a scalar dataset = dataset.map(tf_serialize_example) writer", "to a file. When running in eager mode, calling this function will write", "()) # The result is a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output)", "# noqa: F401 if isinstance(tfrecord, str): tfrecord = [tfrecord] tfrecord = [tfrecord_name_and_json_name(path) for", "that we can read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)),", "= [tfrecord_name_and_json_name(path) for path in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for", "in zip(args, output_types)] args = [tf.reshape(v, s) for v, s in zip(args, output_shapes)]", "path in tfrecord] json_output = tfrecord[0][1] tfrecord = [path[0] for path in tfrecord]", "to convert the TFRecord file back into a dataset. Returns ------- ``tf.Operation`` A", "= {} for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string)", "TFRecord file back into a dataset. Returns ------- ``tf.Operation`` A tf.Operation that, when", "num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta = json.load(f) for k, v in", "tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)]", "args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The result is", "call session.run() on the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump", "= bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args", "file. When running in eager mode, calling this function will write the file.", "a dataset. The TFRecord file must have been created using the :any:`dataset_to_tfrecord` function.", ") with open(json_output) as f: meta = json.load(f) for k, v in meta.items():", "# these imports are needed so that eval can work from tensorflow import", "zip(args, output_types)] args = [tf.reshape(v, s) for v, s in zip(args, output_shapes)] return", "division from __future__ import print_function import json import tensorflow as tf TFRECORDS_EXT =", "TFRecords \"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The tf.data.Dataset that you want", "that contains the data from the TFRecord file. \"\"\" # these imports are", "output, json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def", "key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): # Parse the input", "tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output =", "repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta, f) # create a custom", "function. Parameters ---------- tfrecord : str or list Path to the TFRecord file.", "print_function import json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output", "t) for v, t in zip(args, output_types)] args = [tf.reshape(v, s) for v,", "\"\"\" output, json_output = tfrecord_name_and_json_name(output) # dump the structure so that we can", "json_output = tfrecord_name_and_json_name(output) # dump the structure so that we can read it", "to the TFRecord file. Pass a list if you are sure several tfrecords", "Returns ------- ``tf.data.Dataset`` A dataset that contains the data from the TFRecord file.", "needed when you want to convert the TFRecord file back into a dataset.", "\"w\") as f: json.dump(meta, f) # create a custom map function that serializes", "the TFRecord file. \"\"\" # these imports are needed so that eval can", "TFRecord file. output : str Path to the TFRecord file. Besides this file,", "This json file is needed when you want to convert the TFRecord file", "of files to read in parallel. Defaults to reading files sequentially. Returns -------", "i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto): #", "this function will write the file. Otherwise, you have to call session.run() on", "------- ``tf.data.Dataset`` A dataset that contains the data from the TFRecord file. \"\"\"", "\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "are needed so that eval can work from tensorflow import TensorShape # noqa:", "as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output = output[:", "write the file. Otherwise, you have to call session.run() on the returned operation.", "dictionary above. args = tf.io.parse_single_example( serialized=example_proto, features=feature_description ) args = tf.nest.flatten(args) args =", "function that serializes the dataset def serialize_example_pyfunction(*args): feature = {} for i, f", "feature[key] = bytes_feature(f) example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args)", "return writer.write(dataset) def dataset_from_tfrecord(tfrecord, num_parallel_reads=None): \"\"\"Reads TFRecords and returns a dataset. The TFRecord", "that serializes the dataset def serialize_example_pyfunction(*args): feature = {} for i, f in", "have to call session.run() on the returned operation. \"\"\" output, json_output = tfrecord_name_and_json_name(output)", "we can read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), }", "f in args] tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string) return tf.reshape(tf_string, ()) # The", "in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description =", "output): \"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset``", "`tf.int64` scalar representing the number of files to read in parallel. Defaults to", "[tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)] args = [tf.reshape(v, s) for", "a list if you are sure several tfrecords need the same map function.", "json file is needed when you want to convert the TFRecord file back", "number of files to read in parallel. Defaults to reading files sequentially. Returns", "= value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "from the TFRecord file. \"\"\" # these imports are needed so that eval", "tf.data.Dataset that you want to write into a TFRecord file. output : str", "so that we can read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\":", "= tf.data.TFRecordDataset( tfrecord, num_parallel_reads=num_parallel_reads ) with open(json_output) as f: meta = json.load(f) for", "eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for i in", "The tf.data.Dataset that you want to write into a TFRecord file. output :", "json_output def normalize_tfrecords_path(output): if not output.endswith(TFRECORDS_EXT): output += TFRECORDS_EXT return output def bytes_feature(value):", "been created using the :any:`dataset_to_tfrecord` function. Parameters ---------- tfrecord : str or list", "serializes the dataset def serialize_example_pyfunction(*args): feature = {} for i, f in enumerate(args):", "tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args] tf_string =", "parallel. Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains", "Parse the input tf.Example proto using the dictionary above. args = tf.io.parse_single_example( serialized=example_proto,", "normalize_tfrecords_path(output) json_output = output[: -len(TFRECORDS_EXT)] + \".json\" return output, json_output def normalize_tfrecords_path(output): if", "example_proto.SerializeToString() def tf_serialize_example(*args): args = tf.nest.flatten(args) args = [tf.io.serialize_tensor(f) for f in args]", "the TFRecord file back into a dataset. Returns ------- ``tf.Operation`` A tf.Operation that,", "# The result is a scalar dataset = dataset.map(tf_serialize_example) writer = tf.data.experimental.TFRecordWriter(output) return", "v in meta.items(): meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description", "meta[k] = eval(v) output_types = tf.nest.flatten(meta[\"output_types\"]) output_shapes = tf.nest.flatten(meta[\"output_shapes\"]) feature_description = {} for", "json import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output)", "= { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)), \"output_shapes\": repr(tf.compat.v1.data.get_output_shapes(dataset)), } with open(json_output, \"w\") as f: json.dump(meta,", "for i in range(len(output_types)): key = f\"feature{i}\" feature_description[key] = tf.io.FixedLenFeature([], tf.string) def _parse_function(example_proto):", "these imports are needed so that eval can work from tensorflow import TensorShape", "bytes_feature(value): if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string", "args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)]", "\"\"\"Writes a tf.data.Dataset into a TFRecord file. Parameters ---------- dataset : ``tf.data.Dataset`` The", "Defaults to reading files sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains the", "int A `tf.int64` scalar representing the number of files to read in parallel.", "str or list Path to the TFRecord file. Pass a list if you", "---------- tfrecord : str or list Path to the TFRecord file. Pass a", "\"\"\"Utilities for TFRecords \"\"\" from __future__ import absolute_import from __future__ import division from", "Returns ------- ``tf.Operation`` A tf.Operation that, when run, writes contents of dataset to", "need the same map function. num_parallel_reads: int A `tf.int64` scalar representing the number", "features=feature_description ) args = tf.nest.flatten(args) args = [tf.io.parse_tensor(v, t) for v, t in", "import tensorflow as tf TFRECORDS_EXT = \".tfrecords\" def tfrecord_name_and_json_name(output): output = normalize_tfrecords_path(output) json_output", "structure so that we can read it back meta = { \"output_types\": repr(tf.compat.v1.data.get_output_types(dataset)),", "serialize_example_pyfunction(*args): feature = {} for i, f in enumerate(args): key = f\"feature{i}\" feature[key]", "sequentially. Returns ------- ``tf.data.Dataset`` A dataset that contains the data from the TFRecord", "file is also created. This json file is needed when you want to", "calling this function will write the file. Otherwise, you have to call session.run()", "the dataset def serialize_example_pyfunction(*args): feature = {} for i, f in enumerate(args): key", "dataset that contains the data from the TFRecord file. \"\"\" # these imports" ]
[ "# -*- coding: utf-8 -*- default_app_config = 'ebay_accounts.apps.EbayAccountsConfig' # vim: tabstop=4 expandtab shiftwidth=4", "<filename>ebay_accounts/__init__.py # -*- coding: utf-8 -*- default_app_config = 'ebay_accounts.apps.EbayAccountsConfig' # vim: tabstop=4 expandtab", "coding: utf-8 -*- default_app_config = 'ebay_accounts.apps.EbayAccountsConfig' # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8", "-*- coding: utf-8 -*- default_app_config = 'ebay_accounts.apps.EbayAccountsConfig' # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4" ]
[ "import json from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path", "file to be used for analysis\") args = parser.parse_args() if __name__ == \"__main__\":", "hierarchy[level] = { **hierarchy.get(level, {}), node_id: node } for level in sorted(hierarchy.keys()): print(f\"===", "len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo", "connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" +", "= get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node } for level in", "if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str =", "!= node_id and node_id in connections] return max(resolved_levels) if len(resolved_levels) > 0 else", "{} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"],", "\"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo connections\" print(connected_to_str) print(f\"\\tProperties: {nodes[node_id]['properties']}\") print()", "= argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file to be", "\".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection in", "connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str)", "not exists\") with args.file.open() as f: data = json.load(f) connected_nodes = {} for", "for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to", "if __name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not exists\") with", "not args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open() as f: data =", "{}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0): resolved_levels =", "0 else start_level hierarchy = {} nodes = {} for node_id, node in", "for connected_id, connections in connected_nodes.items() if connected_id != node_id and node_id in connections]", "to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id,", "if not args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open() as f: data", "if connected_id != node_id and node_id in connections] return max(resolved_levels) if len(resolved_levels) >", "node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node } for", "= { **hierarchy.get(level, {}), node_id: node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL", "node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node", "print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in", "**connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0): resolved_levels", "connections in connected_nodes.items() if connected_id != node_id and node_id in connections] return max(resolved_levels)", "[]) + [connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id,", "connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id,", "Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file", "node_id: node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id,", "for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], [])", "{level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([", "connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo connections\" print(connected_to_str) print(f\"\\tProperties:", "level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node } for level", "connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def", "get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node } for level in sorted(hierarchy.keys()):", "[]) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str", "node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level,", "connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]:", "and node_id in connections] return max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy", "hierarchy = {} nodes = {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] =", "= json.load(f) connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"],", "__name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open()", "for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level] =", "print(f\"=== LEVEL {level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \"", "node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']}", "start_level+1) for connected_id, connections in connected_nodes.items() if connected_id != node_id and node_id in", "= {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id)", "connected_id != node_id and node_id in connections] return max(resolved_levels) if len(resolved_levels) > 0", "get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id", "node_id and node_id in connections] return max(resolved_levels) if len(resolved_levels) > 0 else start_level", "= {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"],", "[get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id != node_id and node_id", "> 0: connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo connections\"", "{}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" + connected_to_str", "= {} nodes = {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node", "for analysis\") args = parser.parse_args() if __name__ == \"__main__\": if not args.file.exists(): raise", "in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection]", "from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline", "raise RuntimeError(\"File does not exists\") with args.file.open() as f: data = json.load(f) connected_nodes", "as f: data = json.load(f) connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]]", "= parser.parse_args() if __name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not", "args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open() as f: data = json.load(f)", "} for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node in", "{nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, [])", "\"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open() as f:", "in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) >", "JSON representation file to be used for analysis\") args = parser.parse_args() if __name__", "if len(resolved_levels) > 0 else start_level hierarchy = {} nodes = {} for", "{} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level]", "> 0 else start_level hierarchy = {} nodes = {} for node_id, node", "does not exists\") with args.file.open() as f: data = json.load(f) connected_nodes = {}", "pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON", "exists\") with args.file.open() as f: data = json.load(f) connected_nodes = {} for connection", "= { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id,", "====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\"", "[]) ]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" + connected_to_str else:", "connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1)", "args.file.open() as f: data = json.load(f) connected_nodes = {} for connection in data[\"connections\"]:", "json from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to", "sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str =", "connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0:", "in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \"", "start_level hierarchy = {} nodes = {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id]", "{}), node_id: node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for", "for connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if", "node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\"", "f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id,", "= [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id != node_id and", "node_id in connections] return max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy =", "analysis\") args = parser.parse_args() if __name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File", "pipeline JSON representation file to be used for analysis\") args = parser.parse_args() if", "+ [connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections", "connected_nodes.items() if connected_id != node_id and node_id in connections] return max(resolved_levels) if len(resolved_levels)", "metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file to be used for analysis\")", "<reponame>ibaiGorordo/depthai-experiments import argparse import json from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file',", "connected_id, connections in connected_nodes.items() if connected_id != node_id and node_id in connections] return", "parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file to be used for", "connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections:", "type=Path, help=\"Path to pipeline JSON representation file to be used for analysis\") args", "json.load(f) connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}),", "used for analysis\") args = parser.parse_args() if __name__ == \"__main__\": if not args.file.exists():", "return max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy = {} nodes =", "argparse import json from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path,", "= \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, [])", "connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) +", "max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy = {} nodes = {}", "data = json.load(f) connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = {", "help=\"Path to pipeline JSON representation file to be used for analysis\") args =", "def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if", "representation file to be used for analysis\") args = parser.parse_args() if __name__ ==", "f: data = json.load(f) connected_nodes = {} for connection in data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] =", "nodes[node_id] = node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node", "in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for", "RuntimeError(\"File does not exists\") with args.file.open() as f: data = json.load(f) connected_nodes =", "LEVEL {level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and", "level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"])", "[connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in", "in connections] return max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy = {}", "for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node in hierarchy[level].items():", "\" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for", "to be used for analysis\") args = parser.parse_args() if __name__ == \"__main__\": if", "{ **hierarchy.get(level, {}), node_id: node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level}", "dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id:", "to pipeline JSON representation file to be used for analysis\") args = parser.parse_args()", "else start_level hierarchy = {} nodes = {} for node_id, node in dict(data[\"nodes\"]).items():", "connections] return max(resolved_levels) if len(resolved_levels) > 0 else start_level hierarchy = {} nodes", "\\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ])", "]) if len(connected_to_str) > 0: connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str", "import argparse import json from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\",", "{ **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0):", "**hierarchy.get(level, {}), node_id: node } for level in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\")", "hierarchy[level].items(): print(node[\"name\"]) connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id", "nodes = {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level =", "{} nodes = {} for node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level", "parser.parse_args() if __name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not exists\")", "in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}),", "with args.file.open() as f: data = json.load(f) connected_nodes = {} for connection in", "data[\"connections\"]: connected_nodes[connection[\"node1Id\"]] = { **connected_nodes.get(connection[\"node1Id\"], {}), connection[\"node2Id\"]: connected_nodes.get(connection[\"node1Id\"], {}).get(connection[\"node2Id\"], []) + [connection] }", "resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id != node_id", "be used for analysis\") args = parser.parse_args() if __name__ == \"__main__\": if not", "= node level = get_level(node_id) hierarchy[level] = { **hierarchy.get(level, {}), node_id: node }", "connected_to_str = \" and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id,", "= \"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo connections\" print(connected_to_str) print(f\"\\tProperties: {nodes[node_id]['properties']}\")", "{}).get(connection[\"node2Id\"], []) + [connection] } def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for", "for connection in connected_nodes.get(node_id, {}).get(connected_id, []) ]) if len(connected_to_str) > 0: connected_to_str =", "} def get_level(node_id, start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items()", "import Path parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation", "0: connected_to_str = \"\\tConnections: \" + connected_to_str else: connected_to_str = \"\\tNo connections\" print(connected_to_str)", "in sorted(hierarchy.keys()): print(f\"=== LEVEL {level} ====\") for node_id, node in hierarchy[level].items(): print(node[\"name\"]) connected_to_str", "start_level=0): resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id !=", "node_id, node in dict(data[\"nodes\"]).items(): nodes[node_id] = node level = get_level(node_id) hierarchy[level] = {", "args = parser.parse_args() if __name__ == \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does", "and \".join([ f\"\\\"{connection['node1Output']}\\\" to {nodes[connected_id]['name']} \\\"{connection['node2Input']}\\\"\" for connected_id in connected_nodes.get(node_id, []) for connection", "== \"__main__\": if not args.file.exists(): raise RuntimeError(\"File does not exists\") with args.file.open() as", "parser = argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file to", "argparse.ArgumentParser() parser.add_argument('file', metavar=\"FILE\", type=Path, help=\"Path to pipeline JSON representation file to be used", "len(resolved_levels) > 0 else start_level hierarchy = {} nodes = {} for node_id,", "in connected_nodes.items() if connected_id != node_id and node_id in connections] return max(resolved_levels) if" ]
[ "'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to check", "= StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'),", "_set_score_check(form, field): score = field.data if score not in ['-1', '0', '1', '2',", "of character strings for all possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth',", "from app.models import * import re # list of all Regions regionlist =", "fieldname: raise ValidationError(message) return _not_equal_to # custom validator to check that Set score", "samei # Form generated when looking to search for head to head results", "SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score", "integer>=-1 and <10, or as a W/L character\" def _set_score_check(form, field): score =", "remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag", "to head results between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 =", "RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in /browse_users class", "UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters", "Character filter form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name', choices=[('Main', 'Main')] +", "= StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score',", "from app import app, db from app.models import * import re # list", "choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain", "def _set_score_check(form, field): score = field.data if score not in ['-1', '0', '1',", "('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data", "SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name =", "a DQ value (-1), or is a 'W' or 'L' char def set_score_check():", "'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of character strings", "form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name', choices=[('Main', 'Main')] + character_choices, coerce=str)", "'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] #", "InputRequired, Required, ValidationError, StopValidation from app import app, db from app.models import *", "or is a 'W' or 'L' char def set_score_check(): message = \"You must", "validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()])", "'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to", "validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region", "appears in the dropdown menu. In this case, both should be the samei", "(value, label) is the actual value. The label is what appears in the", "raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region',", "raise ValidationError(message) return _not_equal_to # custom validator to check that Set score can", "Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple", "'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters", "BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream", "'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain", "region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter form", "a W/L character\" def _set_score_check(form, field): score = field.data if score not in", "choices=regionlist, coerce=str) # Character filter form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name',", "coerce=str) # Character filter form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name', choices=[('Main',", "characters choice list for SelectField; a constant list taken by SelectField containing only", "taken by SelectField containing only the 26 SSBM Characters character_choices = [('Fox', 'Fox'),", "'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters choice list for", "'3', '4', '5', '6', '7', '8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check", "_not_equal_to(form, field, fieldname): if form.field == fieldname: raise ValidationError(message) return _not_equal_to # custom", "SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'),", "not in ['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',", "= StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament =", "label is what appears in the dropdown menu. In this case, both should", "SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff',", "= \"Winner and Loser can't be the same!\" def _not_equal_to(form, field, fieldname): if", "IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info')", "['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'W', 'L']:", "if score not in ['-1', '0', '1', '2', '3', '4', '5', '6', '7',", "for choices: The first (value, label) is the actual value. The label is", "('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters choice", "'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of character strings for all", "an integer, is a DQ value (-1), or is a 'W' or 'L'", "= SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info')", "validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'),", "import Form, validators from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from", "class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'),", "two (User.tag) fields are not the same. This function format allows for other", "SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from app", "['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu',", "user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str)", "('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf',", "only the 26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'),", "match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination',", "= [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'),", "'North Carolina')] # characters choice list for SelectField; a constant list taken by", "list for SelectField; a constant list taken by SelectField containing only the 26", "= StringField('user2', validators=[DataRequired()]) # search form in navigation bar class SearchForm(Form): search =", "edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices", "SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices:", "Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'),", "between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) #", "character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach',", "same. This function format allows for other parameters besides (form, field) def not_equal_to(fieldname):", "players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search", "region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in /browse_users class CharacterFilter(Form):", "to an integer, is a DQ value (-1), or is a 'W' or", "StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score',", "('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon", "validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices =", "choice list for SelectField; a constant list taken by SelectField containing only the", "re # list of all Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New", "('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and Watch'), ('Ness',", "app, db from app.models import * import re # list of all Regions", "import re # list of all Regions regionlist = [('Global', 'Global'), ('National', 'National'),", "return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters", "= SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char", "'<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch',", "'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr.", "Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo',", "as a W/L character\" def _set_score_check(form, field): score = field.data if score not", "Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other',", "W/L character\" def _set_score_check(form, field): score = field.data if score not in ['-1',", "Story'), ('Other', 'Other')], coerce=str) # Data not required in case no match info", "StopValidation from app import app, db from app.models import * import re #", "('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of", "or as a W/L character\" def _set_score_check(form, field): score = field.data if score", "character strings for all possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff',", "label) is the actual value. The label is what appears in the dropdown", "bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form):", "navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region form class", "search = StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name = SelectField('region_name',", "validator to check that Set score can be converted to an integer, is", "score as an integer>=-1 and <10, or as a W/L character\" def _set_score_check(form,", "to check if two (User.tag) fields are not the same. This function format", "'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'),", "= StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form in navigation bar", "'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to check if two (User.tag) fields", "Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not", "the Set score as an integer>=-1 and <10, or as a W/L character\"", "'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'W', 'L']: raise", "must submit the Set score as an integer>=-1 and <10, or as a", "validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form):", "case no match info is known (no validators for fields) match_winner = SelectField('match_winner',", "Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'),", "edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count", "This function format allows for other parameters besides (form, field) def not_equal_to(fieldname): message", "user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form in navigation", "converted to an integer, is a DQ value (-1), or is a 'W'", "SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) #", "= IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info =", "wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from app import app, db from", "import * import re # list of all Regions regionlist = [('Global', 'Global'),", "to search for head to head results between players class HeadToHead(Form): user1 =", "In this case, both should be the samei # Form generated when looking", "set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()])", "('Other', 'Other')], coerce=str) # Data not required in case no match info is", "field.data if score not in ['-1', '0', '1', '2', '3', '4', '5', '6',", "validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()])", "Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story',", "('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and", "('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of character strings for all possible", "'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] #", "SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in /browse_users class CharacterFilter(Form): character_name =", "'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi',", "info is known (no validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser =", "be converted to an integer, is a DQ value (-1), or is a", "'W' or 'L' char def set_score_check(): message = \"You must submit the Set", "validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) #", "= [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of", "can't be the same!\" def _not_equal_to(form, field, fieldname): if form.field == fieldname: raise", "('Mr. Game and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu',", "Form generated when looking to search for head to head results between players", "Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not required in case no", "char def set_score_check(): message = \"You must submit the Set score as an", "'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario',", "SelectField containing only the 26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'),", "== fieldname: raise ValidationError(message) return _not_equal_to # custom validator to check that Set", "SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class", "edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score", "'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi',", "value. The label is what appears in the dropdown menu. In this case,", "both should be the samei # Form generated when looking to search for", "('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s", "<10, or as a W/L character\" def _set_score_check(form, field): score = field.data if", "StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(),", "should be the samei # Form generated when looking to search for head", "'Pichu'), ('Kirby', 'Kirby')] # simple list of character strings for all possible characters", "IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from app import", "('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice", "StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')],", "'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not required in case no match", "validators=[DataRequired()]) # search form in navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()])", "def not_equal_to(fieldname): message = \"Winner and Loser can't be the same!\" def _not_equal_to(form,", "choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament", "Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'),", "# Character filter form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name', choices=[('Main', 'Main')]", "'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'),", "for head to head results between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()])", "Data not required in case no match info is known (no validators for", "for other parameters besides (form, field) def not_equal_to(fieldname): message = \"Winner and Loser", "function format allows for other parameters besides (form, field) def not_equal_to(fieldname): message =", "not the same. This function format allows for other parameters besides (form, field)", "is a DQ value (-1), or is a 'W' or 'L' char def", "coerce=str) # Data not required in case no match info is known (no", "for SelectField; a constant list taken by SelectField containing only the 26 SSBM", "def set_score_check(): message = \"You must submit the Set score as an integer>=-1", "('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr.", "validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()])", "Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'),", "no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag", "[('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams',", "'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator", "= SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region',", "'Kirby')] # simple list of character strings for all possible characters character_list =", "coerce=str) # SelectField format for choices: The first (value, label) is the actual", "Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'),", "('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not required in case", "submit the Set score as an integer>=-1 and <10, or as a W/L", "'Other')], coerce=str) # Data not required in case no match info is known", "check that Set score can be converted to an integer, is a DQ", "'SoCal'), ('North Carolina', 'North Carolina')] # characters choice list for SelectField; a constant", "'Pichu', 'Kirby'] # custom validator to check if two (User.tag) fields are not", "search for head to head results between players class HeadToHead(Form): user1 = StringField('user1',", "list taken by SelectField containing only the 26 SSBM Characters character_choices = [('Fox',", "'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'),", "fields are not the same. This function format allows for other parameters besides", "'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region", "Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'),", "Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>',", "field): score = field.data if score not in ['-1', '0', '1', '2', '3',", "('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr.", "'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch',", "set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score =", "user2 = StringField('user2', validators=[DataRequired()]) # search form in navigation bar class SearchForm(Form): search", "= StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters')", "('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link',", "coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters =", "= BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land',", "generated when looking to search for head to head results between players class", "in ['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'W',", "custom validator to check that Set score can be converted to an integer,", "and <10, or as a W/L character\" def _set_score_check(form, field): score = field.data", "# search form in navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) #", "Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'),", "England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters choice list", "'6', '7', '8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag", "for all possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain", "SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()])", "SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from app import app,", "(User.tag) fields are not the same. This function format allows for other parameters", "Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium',", "is known (no validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser',", "StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()])", "'<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby']", "[('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament')", "field) def not_equal_to(fieldname): message = \"Winner and Loser can't be the same!\" def", "when looking to search for head to head results between players class HeadToHead(Form):", "StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form in navigation bar class", "# custom validator to check that Set score can be converted to an", "'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and", "DQ value (-1), or is a 'W' or 'L' char def set_score_check(): message", "class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in /browse_users", "in navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region form", "StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score =", "an integer>=-1 and <10, or as a W/L character\" def _set_score_check(form, field): score", "set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info", "'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu',", "parameters besides (form, field) def not_equal_to(fieldname): message = \"Winner and Loser can't be", "MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final", "the same. This function format allows for other parameters besides (form, field) def", "Set score can be converted to an integer, is a DQ value (-1),", "Game and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'),", "= StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist,", "Loser can't be the same!\" def _not_equal_to(form, field, fieldname): if form.field == fieldname:", "Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'),", "choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices: The", "validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()])", "Required, ValidationError, StopValidation from app import app, db from app.models import * import", "validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag", "SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()])", "* import re # list of all Regions regionlist = [('Global', 'Global'), ('National',", "'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness',", "= IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage", "custom validator to check if two (User.tag) fields are not the same. This", "import DataRequired, InputRequired, Required, ValidationError, StopValidation from app import app, db from app.models", "('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of character strings for", "score not in ['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8',", "'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus',", "coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField", "validator to check if two (User.tag) fields are not the same. This function", "be the same!\" def _not_equal_to(form, field, fieldname): if form.field == fieldname: raise ValidationError(message)", "the same!\" def _not_equal_to(form, field, fieldname): if form.field == fieldname: raise ValidationError(message) return", "BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation", "add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag =", "SelectField format for choices: The first (value, label) is the actual value. The", "case, both should be the samei # Form generated when looking to search", "[('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain", "StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()])", "choices=character_choices, coerce=str) # SelectField format for choices: The first (value, label) is the", "app.models import * import re # list of all Regions regionlist = [('Global',", "if two (User.tag) fields are not the same. This function format allows for", "the actual value. The label is what appears in the dropdown menu. In", "head to head results between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2", "winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format", "and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list", "edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream", "set_score_check(): message = \"You must submit the Set score as an integer>=-1 and", "StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices", "'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers',", "'<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser',", "'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>',", "'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon", "user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region =", "('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] #", "Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of", "choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters", "\"You must submit the Set score as an integer>=-1 and <10, or as", "'5', '6', '7', '8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form):", "StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str)", "and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to check if two", "select region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter", "field, fieldname): if form.field == fieldname: raise ValidationError(message) return _not_equal_to # custom validator", "strings for all possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach',", "class UserEdit(Form): edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters')", "= StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score',", "(no validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char", "'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'),", "ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()])", "Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to check if", "coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices: The first", "= StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class", "message = \"You must submit the Set score as an integer>=-1 and <10,", "= field.data if score not in ['-1', '0', '1', '2', '3', '4', '5',", "search form in navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select", "looking to search for head to head results between players class HeadToHead(Form): user1", "is a 'W' or 'L' char def set_score_check(): message = \"You must submit", "'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'),", "format allows for other parameters besides (form, field) def not_equal_to(fieldname): message = \"Winner", "format for choices: The first (value, label) is the actual value. The label", "fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices,", "Carolina')] # characters choice list for SelectField; a constant list taken by SelectField", "the samei # Form generated when looking to search for head to head", "'9', 'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()])", "set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(),", "DataRequired, InputRequired, Required, ValidationError, StopValidation from app import app, db from app.models import", "= StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count =", "of all Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'),", "('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser',", "('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag", "value (-1), or is a 'W' or 'L' char def set_score_check(): message =", "score = field.data if score not in ['-1', '0', '1', '2', '3', '4',", "Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link',", "('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr.", "StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form):", "= ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario',", "'8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag',", "The label is what appears in the dropdown menu. In this case, both", "from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired,", "The first (value, label) is the actual value. The label is what appears", "('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr.", "('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain", "'7', '8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag =", "character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr.", "edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score =", "StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament')", "possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice", "# Form generated when looking to search for head to head results between", "set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'),", "validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count =", "BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag',", "'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game", "edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info", "# custom validator to check if two (User.tag) fields are not the same.", "return _not_equal_to # custom validator to check that Set score can be converted", "not_equal_to(fieldname): message = \"Winner and Loser can't be the same!\" def _not_equal_to(form, field,", "('North Carolina', 'North Carolina')] # characters choice list for SelectField; a constant list", "form in navigation bar class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region", "('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young", "message = \"Winner and Loser can't be the same!\" def _not_equal_to(form, field, fieldname):", "'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')],", "# characters choice list for SelectField; a constant list taken by SelectField containing", "StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count", "in case no match info is known (no validators for fields) match_winner =", "match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices,", "ValidationError, StopValidation from app import app, db from app.models import * import re", "('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and", "# list of all Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New England',", "check if two (User.tag) fields are not the same. This function format allows", "edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form):", "('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu',", "= SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices: The first (value, label)", "filter form in /browse_users class CharacterFilter(Form): character_name = SelectField('character_name', choices=[('Main', 'Main')] + character_choices,", "SelectField; a constant list taken by SelectField containing only the 26 SSBM Characters", "Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda',", "StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count',", "match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str)", "= SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag',", "not required in case no match info is known (no validators for fields)", "('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters choice list for SelectField; a", "Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'),", "besides (form, field) def not_equal_to(fieldname): message = \"Winner and Loser can't be the", "'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')]", "choices: The first (value, label) is the actual value. The label is what", "HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form in", "IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices = [('Battlefield',", "simple list of character strings for all possible characters character_list = ['Fox', 'Falco',", "'Zelda', 'Roy', 'Mewtwo', 'Mr. Game and Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom", "class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()])", "from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from app import app, db", "dropdown menu. In this case, both should be the samei # Form generated", "import app, db from app.models import * import re # list of all", "validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score',", "is the actual value. The label is what appears in the dropdown menu.", "no match info is known (no validators for fields) match_winner = SelectField('match_winner', coerce=str)", "other parameters besides (form, field) def not_equal_to(fieldname): message = \"Winner and Loser can't", "'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'),", "list of all Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New", "what appears in the dropdown menu. In this case, both should be the", "set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:', choices = [('1','1'),", "ValidationError(message) return _not_equal_to # custom validator to check that Set score can be", "Set score as an integer>=-1 and <10, or as a W/L character\" def", "'Bowser', 'Pichu', 'Kirby'] # custom validator to check if two (User.tag) fields are", "'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link',", "= StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score", "Form, validators from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators", "wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired,", "list of character strings for all possible characters character_list = ['Fox', 'Falco', 'Sheik',", "StringField('user2', validators=[DataRequired()]) # search form in navigation bar class SearchForm(Form): search = StringField('search',", "fieldname): if form.field == fieldname: raise ValidationError(message) return _not_equal_to # custom validator to", "'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'),", "(-1), or is a 'W' or 'L' char def set_score_check(): message = \"You", "of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'),", "('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy',", "validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag", "a constant list taken by SelectField containing only the 26 SSBM Characters character_choices", "'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game and Watch'),", "# Data not required in case no match info is known (no validators", "set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best", "_not_equal_to # custom validator to check that Set score can be converted to", "= \"You must submit the Set score as an integer>=-1 and <10, or", "'1', '2', '3', '4', '5', '6', '7', '8', '9', 'W', 'L']: raise ValidationError(message)", "'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy', 'Mewtwo', 'Mr. Game", "first (value, label) is the actual value. The label is what appears in", "\"Winner and Loser can't be the same!\" def _not_equal_to(form, field, fieldname): if form.field", "= SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag =", "db from app.models import * import re # list of all Regions regionlist", "all Regions regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal',", "menu. In this case, both should be the samei # Form generated when", "'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo',", "Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'),", "'Kirby'] # custom validator to check if two (User.tag) fields are not the", "of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str)", "match info is known (no validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser", "allows for other parameters besides (form, field) def not_equal_to(fieldname): message = \"Winner and", "'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s", "this case, both should be the samei # Form generated when looking to", "('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'), ('Zelda',", "= [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina',", "validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form in navigation bar class SearchForm(Form):", "if form.field == fieldname: raise ValidationError(message) return _not_equal_to # custom validator to check", "set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info =", "= [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament =", "known (no validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str)", "validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage',", "= SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str)", "('Kirby', 'Kirby')] # simple list of character strings for all possible characters character_list", "integer, is a DQ value (-1), or is a 'W' or 'L' char", "are not the same. This function format allows for other parameters besides (form,", "= IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage = SelectField('match_stage', choices =", "or 'L' char def set_score_check(): message = \"You must submit the Set score", "UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices,", "= StringField('loser_tag', validators=[DataRequired()]) set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()])", "user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag =", "('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice", "'2', '3', '4', '5', '6', '7', '8', '9', 'W', 'L']: raise ValidationError(message) return", "'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'),", "a 'W' or 'L' char def set_score_check(): message = \"You must submit the", "IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class MatchSubmit(Form): match_stage =", "head results between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2',", "import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required,", "# simple list of character strings for all possible characters character_list = ['Fox',", "edit_tag = StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters =", "'Ice Climbers'), ('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi',", "in the dropdown menu. In this case, both should be the samei #", "StringField('tag', validators=[DataRequired()]) edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class", "and Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby',", "'4', '5', '6', '7', '8', '9', 'W', 'L']: raise ValidationError(message) return _set_score_check class", "Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not required in", "('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>',", "form.field == fieldname: raise ValidationError(message) return _not_equal_to # custom validator to check that", "Carolina', 'North Carolina')] # characters choice list for SelectField; a constant list taken", "from flask.ext.wtf import Form, validators from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField,", "required in case no match info is known (no validators for fields) match_winner", "= SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in /browse_users class CharacterFilter(Form): character_name", "constant list taken by SelectField containing only the 26 SSBM Characters character_choices =", "'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi', 'Yoshi'),", "'L']: raise ValidationError(message) return _set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region =", "coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char',", "by SelectField containing only the 26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco',", "('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag =", "the 26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth',", "Watch', 'Ness', 'Bowser', 'Pichu', 'Kirby'] # custom validator to check if two (User.tag)", "actual value. The label is what appears in the dropdown menu. In this", "is what appears in the dropdown menu. In this case, both should be", "score can be converted to an integer, is a DQ value (-1), or", "'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'),", "'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf', 'Luigi', 'Mario', '<NAME>', 'Link', '<NAME>', 'Yoshi', 'Zelda', 'Roy',", "results between players class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()])", "the dropdown menu. In this case, both should be the samei # Form", "'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young", "= SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final", "SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices: The first (value, label) is", "as an integer>=-1 and <10, or as a W/L character\" def _set_score_check(form, field):", "Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) #", "'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus', 'Ganondorf',", "'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers', 'Dr. Mario', 'Pikachu', 'Samus',", "loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for choices: The first (value,", "for fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char',", "[('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North", "England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')] # characters choice list for SelectField;", "= SelectField('winner_char', choices=character_choices, coerce=str) loser_char = SelectField('loser_char', choices=character_choices, coerce=str) # SelectField format for", "class HeadToHead(Form): user1 = StringField('user1', validators=[DataRequired()]) user2 = StringField('user2', validators=[DataRequired()]) # search form", "SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char = SelectField('winner_char', choices=character_choices, coerce=str) loser_char =", "= StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()]) edit_winner_score = IntegerField('winner_score', validators=[InputRequired()]) edit_loser_score =", "= StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str) class UserEdit(Form): edit_tag = StringField('tag',", "= BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag =", "form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character filter form in", "def _not_equal_to(form, field, fieldname): if form.field == fieldname: raise ValidationError(message) return _not_equal_to #", "<filename>app/forms.py from flask.ext.wtf import Form, validators from wtforms import StringField, BooleanField, TextAreaField, SelectField,", "validators=[InputRequired()]) edit_loser_score = IntegerField('loser_score', validators=[InputRequired()]) edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()]) edit_match_info = BooleanField('edit_match_info') class", "Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'),", "'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'), ('Jigglypuff', 'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon',", "('Luigi', 'Luigi'), ('Mario', 'Mario'), ('Young Link', 'Young Link'), ('Link', 'Link'), ('<NAME>', '<NAME>'), ('Yoshi',", "26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik', 'Sheik'), ('Marth', 'Marth'),", "edit_region = StringField('region', validators=[DataRequired()]) add_characters = SelectMultipleField('add_characters') remove_characters = SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament", "and Loser can't be the same!\" def _not_equal_to(form, field, fieldname): if form.field ==", "# SelectField format for choices: The first (value, label) is the actual value.", "that Set score can be converted to an integer, is a DQ value", "# select region form class RegionSelect(Form): region_name = SelectField('region_name', choices=regionlist, coerce=str) # Character", "StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError,", "regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North", "class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters = SelectMultipleField('characters',", "'Pokemon Stadium'), ('Yoshi\\'s Story', 'Yoshi\\'s Story'), ('Other', 'Other')], coerce=str) # Data not required", "app import app, db from app.models import * import re # list of", "class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag', validators=[DataRequired()]) edit_loser_tag = StringField('loser_tag', validators=[DataRequired()])", "all possible characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon',", "SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag', validators=[DataRequired()]) set_winner_score", "characters character_list = ['Fox', 'Falco', 'Sheik', 'Marth', 'Jigglypuff', 'Peach', 'Captain Falcon', 'Ice Climbers',", "= StringField('winner_score', validators=[DataRequired(), set_score_check()]) set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()]) set_max_match_count = SelectField('Best of:',", "of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form):", "'Yoshi'), ('Zelda', 'Zelda'), ('Roy', 'Roy'), ('Mewtwo', 'Mewtwo'), ('Mr. Game and Watch', 'Mr. Game", "validators from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import", "('Dr. Mario', 'Dr. Mario'), ('Pikachu', 'Pikachu'), ('Samus', 'Samus'), ('Ganondorf', 'Ganondorf'), ('Luigi', 'Luigi'), ('Mario',", "'L' char def set_score_check(): message = \"You must submit the Set score as", "('7','7')], validators=[Required()]) no_match_info = BooleanField('no_match_info') class SetEdit(Form): edit_tournament = StringField('tournament') edit_winner_tag = StringField('winner_tag',", "flask.ext.wtf import Form, validators from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField", "containing only the 26 SSBM Characters character_choices = [('Fox', 'Fox'), ('Falco', 'Falco'), ('Sheik',", "(form, field) def not_equal_to(fieldname): message = \"Winner and Loser can't be the same!\"", "same!\" def _not_equal_to(form, field, fieldname): if form.field == fieldname: raise ValidationError(message) return _not_equal_to", "'Jigglypuff'), ('Peach', 'Peach'), ('Captain Falcon', 'Captain Falcon'), ('Ice Climbers', 'Ice Climbers'), ('Dr. Mario',", "('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')] # simple list of character", "class SearchForm(Form): search = StringField('search', validators=[InputRequired()]) # select region form class RegionSelect(Form): region_name", "TextAreaField, SelectField, IntegerField, SelectMultipleField from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation from", "can be converted to an integer, is a DQ value (-1), or is", "to check that Set score can be converted to an integer, is a", "be the samei # Form generated when looking to search for head to", "character\" def _set_score_check(form, field): score = field.data if score not in ['-1', '0',", "validators for fields) match_winner = SelectField('match_winner', coerce=str) match_loser = SelectField('match_loser', coerce=str) winner_char =", "_set_score_check class UserCreate(Form): user_tag = StringField('tag', validators=[DataRequired()]) user_region = StringField('region', validators=[DataRequired()]) user_characters =", "SelectMultipleField('remove_characters') class SetCreate(Form): set_tournament = StringField('tournament') set_winner_tag = StringField('winner_tag', validators=[DataRequired()]) set_loser_tag = StringField('loser_tag',", "Watch', 'Mr. Game and Watch'), ('Ness', 'Ness'), ('Bowser', 'Bowser'), ('Pichu', 'Pichu'), ('Kirby', 'Kirby')]" ]
[ "_forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self, y, t): return", "LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y =", "from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1))", "train=False): y = self.network.l1(x) return y def _loss_func(self, y, t): return F.mean_squared_error(y, t)", "y = self.network.l1(x) return y def _loss_func(self, y, t): return F.mean_squared_error(y, t) class", "return y def _loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self,", "<filename>skchainer/linear.py __author__ = 'du' from chainer import Chain, functions as F from .", "import Chain, functions as F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def", "ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x,", "'du' from chainer import Chain, functions as F from . import ChainerRegresser, ChainerClassifier", "as F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return", "x, train=False): y = self.network.l1(x) return y def _loss_func(self, y, t): return F.mean_squared_error(y,", "**params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x) return y", "import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self,", "self.network.l1(x) return y def _loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def", "return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self,", ". import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def", "y def _loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params):", "ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False):", "return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x) return y def", "__author__ = 'du' from chainer import Chain, functions as F from . import", "x, train=False): y = self.network.l1(x) return y def _loss_func(self, y, t): return F.softmax_cross_entropy(y,", "**params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y = self.network.l1(x) return y", "def _forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self, y, t):", "= 'du' from chainer import Chain, functions as F from . import ChainerRegresser,", "from chainer import Chain, functions as F from . import ChainerRegresser, ChainerClassifier class", "_setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y = self.network.l1(x) return", "F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"],", "_loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"],", "Chain, functions as F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self,", "1)) def _forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self, y,", "LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y =", "def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y = self.network.l1(x)", "y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"]))", "class LinearRegression(ChainerRegresser): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y", "params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self, y,", "def _loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return", "F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x,", "Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self,", "def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x)", "functions as F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser): def _setup_network(self, **params):", "_setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x) return", "train=False): y = self.network.l1(x) return y def _loss_func(self, y, t): return F.softmax_cross_entropy(y, t)", "chainer import Chain, functions as F from . import ChainerRegresser, ChainerClassifier class LinearRegression(ChainerRegresser):", "t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def", "return Chain(l1=F.Linear(params[\"n_dim\"], 1)) def _forward(self, x, train=False): y = self.network.l1(x) return y def", "class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y", "= self.network.l1(x) return y def _loss_func(self, y, t): return F.mean_squared_error(y, t) class LogisticRegression(ChainerClassifier):", "t) class LogisticRegression(ChainerClassifier): def _setup_network(self, **params): return Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False):", "Chain(l1=F.Linear(params[\"n_dim\"], params[\"n_classes\"])) def _forward(self, x, train=False): y = self.network.l1(x) return y def _loss_func(self," ]
[ "int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role == Qt.ForegroundRole:", "return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight", "self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words:", "QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication,", "def set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns)", "return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <= row_idx < int(self.words_count /", "ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except", "QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut,", "row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role", "== 3 and row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2)", "clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self):", "= 24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model =", "action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on", "self.read_only = ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None,", "ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard()", "QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic from wnd_utils import WndUtils class", "col_idx = index.column() if col_idx in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable", "int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <= row_idx", "self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete set of seed words into", "idx = row_idx else: idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] =", "self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent", "col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return", "= row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True def flags(self,", "self.columns[section] return '' else: return ' ' def setData(self, index, data, role=None): row_idx", "elif col_idx == 3: if 0 <= row_idx < int(self.words_count / 2): return", "mnemonic import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self,", "if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) +", "int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2,", "== 1: idx = row_idx else: idx = row_idx + int(self.words_count / 2)", "= Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else: ret =", "QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', '", "idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True def", "return str(row_idx + 1) + '.' elif col_idx == 2: return str(int(self.words_count /", "col_idx == 3: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count", "import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic from wnd_utils import", "commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid", "1): word_col_idx = 1 else: word_col_idx = 3 if word_col_idx == 1: word", "role=None): if index.isValid(): col_idx = index.column() row_idx = index.row() if col_idx < len(self.columns):", "QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight elif", "from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint,", "SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] =", "+ 1) + '.' elif col_idx == 2: return str(int(self.words_count / 2) +", "mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list", "'\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def", "return QVariant() if orientation == 0x1: if section < len(self.columns): return self.columns[section] return", "def setData(self, index, data, role=None): row_idx = index.row() col_idx = index.column() if 0", "role=None): if role != 0: return QVariant() if orientation == 0x1: if section", "e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e))", "in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx + 1) + '.'", "else: ret = Qt.ItemIsEnabled return ret def data(self, index, role=None): if index.isValid(): col_idx", "3: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2)", "row_idx + 1) + '.' elif col_idx == 1: if 0 <= row_idx", "def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return", "2): if col_idx in (0, 1): word_col_idx = 1 else: word_col_idx = 3", "return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray')", "col_idx in (0, 2): return Qt.AlignRight elif role == Qt.FontRole: pass return QVariant()", "words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here to display #", "= mnemonic_word_list self.words_count = 24 self.read_only = False self.columns = [ \"#\", 'Word',", "def flags(self, index): col_idx = index.column() if col_idx in (1, 3): ret =", "row_idx < int(self.words_count / 2): if col_idx == 1: idx = row_idx else:", "= [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords:", "QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import", "coding: utf-8 -*- # Author: Bertrand256 # Created on: 2021-04 from typing import", "idx, word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self):", "ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def data(self, index, role=None):", "python3 # -*- coding: utf-8 -*- # Author: Bertrand256 # Created on: 2021-04", "parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list =", "QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int =", "self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self)", "self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str =", "idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e))", "2) + row_idx] elif role == Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count", "and row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx]", "len(self.columns): return self.columns[section] return '' else: return ' ' def setData(self, index, data,", "class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words", "word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width", "Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def", "return self.columns[section] return '' else: return ' ' def setData(self, index, data, role=None):", "= self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used", "2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <= row_idx < int(self.words_count", "parent=None, *args, **kwargs): return self.words_count / 2 def headerData(self, section, orientation, role=None): if", "width = self.viewMnemonic.width() width = int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model)", "== Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count / 2): if col_idx in", "False self.columns = [ \"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count", "import Optional, List from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant,", "/ 2): if col_idx in (0, 1): word_col_idx = 1 else: word_col_idx =", "clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard()", "< int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <=", "self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0)", "# shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce", "index.column() if col_idx in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not", "import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets", "index): col_idx = index.column() if col_idx in (1, 3): ret = Qt.ItemIsEnabled |", "data(self, index, role=None): if index.isValid(): col_idx = index.column() row_idx = index.row() if col_idx", "self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words,", "col_idx in (0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in", "self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class", "= index.column() row_idx = index.row() if col_idx < len(self.columns): if role in (Qt.DisplayRole,", "display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words =", "elif col_idx == 2: return str(int(self.words_count / 2) + row_idx + 1) +", "== 0x1: if section < len(self.columns): return self.columns[section] return '' else: return '", "2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if word and", "' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested)", "self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords", "setup_mnem_view) def set_words(self, words): for idx, word in enumerate(words): if idx < len(self.mnemonic_words):", "if not self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def", "not self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def data(self,", "self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here to display # shortcut", "# not working on Mac (used here to display # shortcut in menu", "QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray') elif", "set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro):", "int = 24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model", "= Qt.ItemIsEnabled return ret def data(self, index, role=None): if index.isValid(): col_idx = index.column()", "= 1 else: word_col_idx = 3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx]", "'#', 'Word' ] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel()", "index.row() if col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx ==", "self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the", "Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight elif role == Qt.FontRole: pass", "= QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\"))", "= self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx < int(self.words_count / 2): word", "Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"] * 24", "__init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list", "index.row() col_idx = index.column() if 0 <= row_idx < int(self.words_count / 2): if", "self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete", "QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word in enumerate(words): if idx <", "< int(self.words_count / 2): if col_idx in (0, 1): word_col_idx = 1 else:", "if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard =", "try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str)", "return self.words_count / 2 def headerData(self, section, orientation, role=None): if role != 0:", "str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split()", "self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can", "(1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable", "1 else: word_col_idx = 3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif", "section < len(self.columns): return self.columns[section] return '' else: return ' ' def setData(self,", "== 2: return str(int(self.words_count / 2) + row_idx + 1) + '.' elif", "except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard:", "row_idx = index.row() if col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if", "row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0", "Bertrand256 # Created on: 2021-04 from typing import Optional, List from PyQt5 import", "*args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2", "parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24", "24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None", "2021-04 from typing import Optional, List from PyQt5 import QtCore, QtWidgets, QtGui from", "2): if col_idx == 1: idx = row_idx else: idx = row_idx +", "for idx, word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def", "ret def data(self, index, role=None): if index.isValid(): col_idx = index.column() row_idx = index.row()", "'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self):", "= [] for idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w)", "in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in (0, 2):", "(2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10,", "Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0,", "for idx, w in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] = w", "else: idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True", "/ 2): if col_idx == 1: idx = row_idx else: idx = row_idx", "shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\")", "self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <= row_idx < int(self.words_count / 2):", "QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import", "[\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu]", "<= row_idx < int(self.words_count / 2): if col_idx in (0, 1): word_col_idx =", "\"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def", "QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui", "ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e:", "def set_words(self, words): for idx, word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx]", "self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english')", "all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here to display", "= 24 self.read_only = False self.columns = [ \"#\", 'Word', '#', 'Word' ]", "None self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic =", "index.column() row_idx = index.row() if col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole):", "set of seed words into this dialog directly (separated ' 'by spaces, commas", "breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords", "= word def get_cur_mnemonic_words(self): ws = [] for idx, w in enumerate(self.mnemonic_words): if", "if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx", "int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if", "ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else: ret", "@pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel):", "self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro def columnCount(self,", "== 3: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count /", "if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = [] for", "-*- coding: utf-8 -*- # Author: Bertrand256 # Created on: 2021-04 from typing", "- (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40)", "+ int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True def flags(self, index): col_idx", "can copy and paste the complete set of seed words into this dialog", "self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy", "<= row_idx < int(self.words_count / 2): if col_idx == 1: idx = row_idx", "from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic from", "width = int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1,", "get_cur_mnemonic_words(self): ws = [] for idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count:", "= parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only =", "= row_idx else: idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data", "def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None", "3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else:", "of seed words into this dialog directly (separated ' 'by spaces, commas or", "line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu", "row_idx] else: return if word and word not in self.dictionary_words: return QtGui.QColor('red') elif", "-*- # Author: Bertrand256 # Created on: 2021-04 from typing import Optional, List", "break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self,", "self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self)", "self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word in enumerate(words): if", "self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only = False self.columns = [ \"#\",", "def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width", "index, role=None): if index.isValid(): col_idx = index.column() row_idx = index.row() if col_idx <", "mnemonic_word_list self.words_count = 24 self.read_only = False self.columns = [ \"#\", 'Word', '#',", "role=None): row_idx = index.row() col_idx = index.column() if 0 <= row_idx < int(self.words_count", "self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except", "6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords = QMenu(self) # copy action", "[] for idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return", "def get_cur_mnemonic_words(self): ws = [] for idx, w in enumerate(self.mnemonic_words): if idx >=", "' ').replace(\",\", ' ') ws = ws_str.split() for idx, w in enumerate(ws): if", "try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list,", "row_idx] elif role == Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count / 2):", "= QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)", "if orientation == 0x1: if section < len(self.columns): return self.columns[section] return '' else:", "or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context", "ret = Qt.ItemIsEnabled return ret def data(self, index, role=None): if index.isValid(): col_idx =", "idx >= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words()", "import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None", "for idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return ws", "on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self,", "'Word' ] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel()", "self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords = QMenu(self)", "(separated ' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6)", "setup_mnem_view(): width = self.viewMnemonic.width() width = int((width - (2 * 40)) / 2)", "WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer:", "role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx + 1) +", "0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic)", "0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx]", "if word and word not in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole:", "QShortcut, QApplication, QLabel from mnemonic import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget):", "self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords =", "enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = []", "elif role == Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray') elif role", "not in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in (0,", "not working on Mac (used here to display # shortcut in menu item", "QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from", "columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count", "self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def data(self, index,", "row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else:", "if role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx + 1)", "40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word in enumerate(words): if idx", "Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here to", "# Author: Bertrand256 # Created on: 2021-04 from typing import Optional, List from", "self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here", "QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard", "point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent,", "if col_idx in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only:", "= word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width - (2", "import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent)", "ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split() for", "self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg =", "def rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2 def headerData(self, section, orientation,", "QLabel from mnemonic import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self,", ">= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str", "ro): self.read_only = ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self,", "col_idx in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret", "int(self.words_count / 2): if col_idx == 1: idx = row_idx else: idx =", "|= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def data(self, index, role=None): if", "def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3)", "row_idx else: idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data return", "setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\")", "= ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split() for idx,", "menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\"))", "working on Mac (used here to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"),", "return '' else: return ' ' def setData(self, index, data, role=None): row_idx =", "return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard =", "PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic from wnd_utils", "* 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] =", "Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count / 2): if col_idx in (0,", "to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words", "< int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role ==", "(Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx + 1) + '.' elif", "<= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif", "from mnemonic import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent):", "== Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight elif role == Qt.FontRole:", "self.endResetModel() def set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None, *args, **kwargs): return", "QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count =", "self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete set of seed words", "= 3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3", "None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"]", "= data return True def flags(self, index): col_idx = index.column() if col_idx in", "2 def headerData(self, section, orientation, role=None): if role != 0: return QVariant() if", "if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx ==", "dialog directly (separated ' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height()", "').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split() for idx, w in enumerate(ws):", "return ' ' def setData(self, index, data, role=None): row_idx = index.row() col_idx =", "return str(int(self.words_count / 2) + row_idx + 1) + '.' elif col_idx ==", "set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width =", "return if word and word not in self.dictionary_words: return QtGui.QColor('red') elif role ==", "on Mac (used here to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered)", "if index.isValid(): col_idx = index.column() row_idx = index.row() if col_idx < len(self.columns): if", "def headerData(self, section, orientation, role=None): if role != 0: return QVariant() if orientation", "' def setData(self, index, data, role=None): row_idx = index.row() col_idx = index.column() if", "from typing import Optional, List from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore", "copy and paste the complete set of seed words into this dialog directly", "Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret def data(self, index, role=None): if index.isValid():", "col_idx == 1: idx = row_idx else: idx = row_idx + int(self.words_count /", "except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self,", "Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return ret", "self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False)", "import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt", "ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard", "if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws", "return ret def data(self, index, role=None): if index.isValid(): col_idx = index.column() row_idx =", "self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point):", "= QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n',", "/ 2) + row_idx] else: return if word and word not in self.dictionary_words:", "<= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if", "2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words):", "MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main", ">= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint)", "word_col_idx == 3 and row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count /", "'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) #", "self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width()", "0x1: if section < len(self.columns): return self.columns[section] return '' else: return ' '", "self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words):", "import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import", "if col_idx in (0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx", "None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0,", "0: return QVariant() if orientation == 0x1: if section < len(self.columns): return self.columns[section]", "len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx +", "PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic", "def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width - (2 * 40)) /", "self.words_count = 24 self.read_only = False self.columns = [ \"#\", 'Word', '#', 'Word'", "self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word in", "section, orientation, role=None): if role != 0: return QVariant() if orientation == 0x1:", "< int(self.words_count / 2): if col_idx == 1: idx = row_idx else: idx", "= None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words: List[str] =", "= [ \"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count = words_count", "QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic from wnd_utils import WndUtils", "True def flags(self, index): col_idx = index.column() if col_idx in (1, 3): ret", "self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx < int(self.words_count / 2): word =", "len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2 def headerData(self, section,", "+ '.' elif col_idx == 1: if 0 <= row_idx < int(self.words_count /", "word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if word and word", "QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete set of seed", "Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as", "2) + row_idx] else: return if word and word not in self.dictionary_words: return", "= self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count", "Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if", "if section < len(self.columns): return self.columns[section] return '' else: return ' ' def", "self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx,", "0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3:", "1) + '.' elif col_idx == 1: if 0 <= row_idx < int(self.words_count", "self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main')", "= QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try:", "words): for idx, word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word", "role == Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight elif role ==", "grid context menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy", "Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel", "self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width - (2 * 40))", "# words grid context menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords =", "set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def", "QVariant() if orientation == 0x1: if section < len(self.columns): return self.columns[section] return ''", "in (0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in (0,", "in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws =", "2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role == Qt.ForegroundRole: if 0", "def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def", "dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic =", "ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs):", "QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)", "self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width -", "').replace(\",\", ' ') ws = ws_str.split() for idx, w in enumerate(ws): if idx", "== 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx < int(self.words_count", "str(int(self.words_count / 2) + row_idx + 1) + '.' elif col_idx == 1:", "self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You", "ws_str.split() for idx, w in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] =", "= self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if word and word not", "paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count):", "parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only = False", "row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True def flags(self, index):", "col_idx == 1: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx]", "and paste the complete set of seed words into this dialog directly (separated", "typing import Optional, List from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import", "class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem]", "0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False)", "== 1: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif", "self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg)", "def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if", "self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def", "word_col_idx = 3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx ==", "elif role == Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count / 2): if", "enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as", "words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro def", "paste the complete set of seed words into this dialog directly (separated '", "self.columns = [ \"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count =", "else: return if word and word not in self.dictionary_words: return QtGui.QColor('red') elif role", "headerData(self, section, orientation, role=None): if role != 0: return QVariant() if orientation ==", "e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent =", "< int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return", "self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text() if", "enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws", "self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as", "Qt.EditRole): if col_idx == 0: return str(row_idx + 1) + '.' elif col_idx", "idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return ws def", "if col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0:", "as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception as e:", "in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered)", "return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2 def headerData(self,", "col_idx = index.column() if 0 <= row_idx < int(self.words_count / 2): if col_idx", "index, data, role=None): row_idx = index.row() col_idx = index.column() if 0 <= row_idx", "str(row_idx + 1) + '.' elif col_idx == 2: return str(int(self.words_count / 2)", "40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word", "idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = [] for idx,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Bertrand256 # Created on:", "(used here to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste", "seed words into this dialog directly (separated ' 'by spaces, commas or line", "+ 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords = QMenu(self) # copy", "idx, w in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view()", "= Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def", "parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int", "= None self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic", "MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words =", "menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\")", "word def get_cur_mnemonic_words(self): ws = [] for idx, w in enumerate(self.mnemonic_words): if idx", "if col_idx == 1: idx = row_idx else: idx = row_idx + int(self.words_count", "/ 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self,", "] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def", "/ 2 def headerData(self, section, orientation, role=None): if role != 0: return QVariant()", "self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words: List[str] = [\"\"] *", "int(self.words_count / 2): if col_idx in (0, 1): word_col_idx = 1 else: word_col_idx", "ws = ws_str.split() for idx, w in enumerate(ws): if idx >= self.word_count: break", "if idx >= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws =", "'' else: return ' ' def setData(self, index, data, role=None): row_idx = index.row()", "rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2 def headerData(self, section, orientation, role=None):", "= QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True)", "*args, **kwargs): return self.words_count / 2 def headerData(self, section, orientation, role=None): if role", "PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from", "QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered)", "words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only", "Mac (used here to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) #", "QApplication, QLabel from mnemonic import Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def", "w in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except", "# -*- coding: utf-8 -*- # Author: Bertrand256 # Created on: 2021-04 from", "def data(self, index, role=None): if index.isValid(): col_idx = index.column() row_idx = index.row() if", "+ row_idx + 1) + '.' elif col_idx == 1: if 0 <=", "if role != 0: return QVariant() if orientation == 0x1: if section <", "in (0, 1): word_col_idx = 1 else: word_col_idx = 3 if word_col_idx ==", "self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in (0, 2): return", "= QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete set of", "# Created on: 2021-04 from typing import Optional, List from PyQt5 import QtCore,", "self.viewMnemonic.width() width = int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40)", "role != 0: return QVariant() if orientation == 0x1: if section < len(self.columns):", "self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\")", "= index.row() col_idx = index.column() if 0 <= row_idx < int(self.words_count / 2):", "try: clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str", "col_idx = index.column() row_idx = index.row() if col_idx < len(self.columns): if role in", "**kwargs): return self.words_count / 2 def headerData(self, section, orientation, role=None): if role !=", "self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count)", "word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx <", "wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] =", "self.words_count / 2 def headerData(self, section, orientation, role=None): if role != 0: return", "width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for idx, word in enumerate(words):", "PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer,", "col_idx == 2: return str(int(self.words_count / 2) + row_idx + 1) + '.'", "def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None,", "# copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not", "self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only =", "refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None, *args,", "context menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all", "if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r',", "set_words(self, words): for idx, word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] =", "ws_str = clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\",", "item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"),", "len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = [] for idx, w in", "Mnemonic from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main:", "+ row_idx] else: return if word and word not in self.dictionary_words: return QtGui.QColor('red')", "(0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in (0, 2):", "from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from", "into this dialog directly (separated ' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg)", "clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str =", "= clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", '", "def set_words_count(self, words_count): self.words_count = words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self,", "| Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled return", "!= 0: return QVariant() if orientation == 0x1: if section < len(self.columns): return", "self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and", "0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True)", "__init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count:", "data, role=None): row_idx = index.row() col_idx = index.column() if 0 <= row_idx <", "self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role == Qt.ForegroundRole: if 0 <= row_idx", "and word not in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx", "col_idx == 0: return str(row_idx + 1) + '.' elif col_idx == 2:", "0 <= row_idx < int(self.words_count / 2): if col_idx == 1: idx =", "if 0 <= row_idx < int(self.words_count / 2): if col_idx == 1: idx", "words grid context menu self.popMenuWords = QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f", "= w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try:", "= int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width)", "return True def flags(self, index): col_idx = index.column() if col_idx in (1, 3):", "dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only = False self.columns = [", "+ '.' elif col_idx == 2: return str(int(self.words_count / 2) + row_idx +", "(0, 1): word_col_idx = 1 else: word_col_idx = 3 if word_col_idx == 1:", "< len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = [] for idx, w", "word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width = self.viewMnemonic.width() width = int((width - (2 *", "== Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole:", "QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width =", "return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role == Qt.ForegroundRole: if 0 <=", "self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def set_words(self, words): for", "clipboard: ws_str = clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', '", "self.popMenuWords = QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered)", "word_col_idx = 1 else: word_col_idx = 3 if word_col_idx == 1: word =", "Optional, List from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel,", "index.column() if 0 <= row_idx < int(self.words_count / 2): if col_idx == 1:", "col_idx in (0, 1): word_col_idx = 1 else: word_col_idx = 3 if word_col_idx", "2) + row_idx + 1) + '.' elif col_idx == 1: if 0", "/ 2): return self.mnemonic_word_list[int(self.words_count / 2) + row_idx] elif role == Qt.ForegroundRole: if", "complete set of seed words into this dialog directly (separated ' 'by spaces,", "= '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e))", "role == Qt.BackgroundRole: if col_idx in (0, 2): return QtGui.QColor('lightgray') elif role ==", "self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view():", "= words_count self.refresh_view() def refresh_view(self): self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro", "2): return QtGui.QColor('lightgray') elif role == Qt.TextAlignmentRole: if col_idx in (0, 2): return", "else: word_col_idx = 3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx", "if 0 <= row_idx < int(self.words_count / 2): if col_idx in (0, 1):", "dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count", "QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\")", "/ 2): return self.mnemonic_word_list[row_idx] elif col_idx == 3: if 0 <= row_idx <", "0: return str(row_idx + 1) + '.' elif col_idx == 2: return str(int(self.words_count", "role == Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count / 2): if col_idx", "Created on: 2021-04 from typing import Optional, List from PyQt5 import QtCore, QtWidgets,", "Optional[QtWidgets.QVBoxLayout] = None self.spacer: Optional[QtWidgets.QSpacerItem] = None self.word_count: int = 24 self.mnemonic_words: List[str]", "directly (separated ' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() +", "= index.column() if 0 <= row_idx < int(self.words_count / 2): if col_idx ==", "words into this dialog directly (separated ' 'by spaces, commas or line breaks).')", "spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize( self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words", "ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split() for idx, w", "= MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\")", "3 and row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count / 2) +", "') ws = ws_str.split() for idx, w in enumerate(ws): if idx >= self.word_count:", "= index.row() if col_idx < len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx", "flags(self, index): col_idx = index.column() if col_idx in (1, 3): ret = Qt.ItemIsEnabled", "orientation == 0x1: if section < len(self.columns): return self.columns[section] return '' else: return", "from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence", "break ws.append(w) return ws def on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws)", "self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only", "w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point))", "here to display # shortcut in menu item QShortcut(QKeySequence(\"Ctrl+C\"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered) # paste action", "# paste action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self,", "data return True def flags(self, index): col_idx = index.column() if col_idx in (1,", "'.' elif col_idx == 1: if 0 <= row_idx < int(self.words_count / 2):", "index.isValid(): col_idx = index.column() row_idx = index.row() if col_idx < len(self.columns): if role", "in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |=", "if idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e:", "self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count =", "elif role == Qt.TextAlignmentRole: if col_idx in (0, 2): return Qt.AlignRight elif role", "40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view) def", "Qt.ItemIsEnabled return ret def data(self, index, role=None): if index.isValid(): col_idx = index.column() row_idx", "' ') ws = ws_str.split() for idx, w in enumerate(ws): if idx >=", "Exception as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent)", "parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count /", "' ' def setData(self, index, data, role=None): row_idx = index.row() col_idx = index.column()", "word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx < int(self.words_count / 2):", "self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self)", "copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working", "in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception", "row_idx = index.row() col_idx = index.column() if 0 <= row_idx < int(self.words_count /", "clipboard.text() if isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ')", "on: 2021-04 from typing import Optional, List from PyQt5 import QtCore, QtWidgets, QtGui", "parent) self.parent = parent self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24", "2) self.mnemonic_word_list[idx] = data return True def flags(self, index): col_idx = index.column() if", "elif col_idx == 1: if 0 <= row_idx < int(self.words_count / 2): return", "self.word_count: break self.mnemonic_words[idx] = w self.grid_model.refresh_view() except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def", "= dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only = False self.columns =", "' ').replace('\\r', ' ').replace(\",\", ' ') ws = ws_str.split() for idx, w in", "self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg):", "self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws = [] for idx, w in enumerate(self.mnemonic_words):", "Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str", "ws = [] for idx, w in enumerate(self.mnemonic_words): if idx >= self.word_count: break", "word in enumerate(words): if idx < len(self.mnemonic_words): self.mnemonic_words[idx] = word def get_cur_mnemonic_words(self): ws", "Author: Bertrand256 # Created on: 2021-04 from typing import Optional, List from PyQt5", "/ 2) self.mnemonic_word_list[idx] = data return True def flags(self, index): col_idx = index.column()", "except Exception as e: self.error_msg(str(e)) @pyqtSlot(QPoint) def on_viewMnemonic_customContextMenuRequested(self, point): try: self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point)) except Exception", "= self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard: clipboard.setText(ws_str) except Exception", "'.' elif col_idx == 2: return str(int(self.words_count / 2) + row_idx + 1)", "on_copy_seed_words_triggered(self): try: ws = self.get_cur_mnemonic_words() ws_str = '\\n'.join(ws) clipboard = QApplication.clipboard() if clipboard:", "+ row_idx] elif role == Qt.ForegroundRole: if 0 <= row_idx < int(self.words_count /", "as e: self.error_msg(str(e)) class MnemonicModel(QAbstractTableModel): def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent", "dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0, 0, 0) self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic", "self.viewMnemonic.verticalHeader().fontMetrics().height() + 6) self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested) # words grid context menu self.popMenuWords = QMenu(self) #", "List from PyQt5 import QtCore, QtWidgets, QtGui from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot,", "else: return ' ' def setData(self, index, data, role=None): row_idx = index.row() col_idx", "[ \"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count): self.words_count = words_count self.refresh_view()", "QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget,", "1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and row_idx < int(self.words_count /", "= ws_str.split() for idx, w in enumerate(ws): if idx >= self.word_count: break self.mnemonic_words[idx]", "e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text()", "Qt.ItemIsEnabled | Qt.ItemIsSelectable if not self.read_only: ret |= Qt.ItemIsEditable else: ret = Qt.ItemIsEnabled", "/ 2): word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if word", "clipboard.setText(ws_str) except Exception as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if", "= index.column() if col_idx in (1, 3): ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable if", "= ro def columnCount(self, parent=None, *args, **kwargs): return len(self.columns) def rowCount(self, parent=None, *args,", "row_idx < int(self.words_count / 2): if col_idx in (0, 1): word_col_idx = 1", "if col_idx in (0, 2): return Qt.AlignRight elif role == Qt.FontRole: pass return", "== 0: return str(row_idx + 1) + '.' elif col_idx == 2: return", "/ 2) + row_idx] elif role == Qt.ForegroundRole: if 0 <= row_idx <", "= self.viewMnemonic.width() width = int((width - (2 * 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0,", "the complete set of seed words into this dialog directly (separated ' 'by", "if col_idx in (0, 1): word_col_idx = 1 else: word_col_idx = 3 if", "isinstance(ws_str, str): ws_str = ws_str.replace('\\n', ' ').replace('\\r', ' ').replace(\",\", ' ') ws =", "word not in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if col_idx in", "24 self.mnemonic_words: List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self,", "QMenu(self) # copy action self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) #", "self.mnemonic_word_list[idx] = data return True def flags(self, index): col_idx = index.column() if col_idx", "this dialog directly (separated ' 'by spaces, commas or line breaks).') self.layout_main.addWidget(self.msg) self.viewMnemonic.verticalHeader().setDefaultSectionSize(", "action self.act_paste_words = self.popMenuWords.addAction(\"\\u23ce Paste\") self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered) self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count", "self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste", "word and word not in self.dictionary_words: return QtGui.QColor('red') elif role == Qt.BackgroundRole: if", "self.layout_main.setSpacing(3) self.layout_main.setObjectName(\"verticalLayout\") self.viewMnemonic = QtWidgets.QTableView(self) self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.viewMnemonic.setObjectName(\"viewMnemonic\") self.viewMnemonic.horizontalHeader().setVisible(False) self.viewMnemonic.horizontalHeader().setStretchLastSection(True) self.viewMnemonic.verticalHeader().setVisible(False) self.layout_main.addWidget(self.viewMnemonic) self.msg", "self.act_paste_words.setShortcut(QKeySequence(\"Ctrl+V\")) QShortcut(QKeySequence(\"Ctrl+V\"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered) def set_word_count(self, word_count): self.word_count = word_count self.grid_model.set_words_count(word_count) def setup_mnem_view(): width", "self.actCopyWords = self.popMenuWords.addAction(\"\\u274f Copy all words\") self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered) self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac", "24 self.read_only = False self.columns = [ \"#\", 'Word', '#', 'Word' ] def", "as e: self.error_msg(str(e)) def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str =", "/ 2) + row_idx + 1) + '.' elif col_idx == 1: if", "if col_idx == 0: return str(row_idx + 1) + '.' elif col_idx ==", "self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main =", "self.msg.setText('You can copy and paste the complete set of seed words into this", "self.dictionary_words = dictionary_words self.mnemonic_word_list = mnemonic_word_list self.words_count = 24 self.read_only = False self.columns", "in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self): try:", "self.beginResetModel() self.endResetModel() def set_read_only(self, ro): self.read_only = ro def columnCount(self, parent=None, *args, **kwargs):", "< len(self.columns): return self.columns[section] return '' else: return ' ' def setData(self, index,", "= None self.setupUi(self) def setupUi(self, dlg): dlg.setObjectName(\"SeedWordsWdg\") self.layout_main = QtWidgets.QVBoxLayout(dlg) self.layout_main.setObjectName('layout_main') self.layout_main.setContentsMargins(0, 0,", "setData(self, index, data, role=None): row_idx = index.row() col_idx = index.column() if 0 <=", "def on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str,", "Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist) self.popMenuWords: Optional[QMenu] = None self.setupUi(self) def setupUi(self,", "* 40)) / 2) self.viewMnemonic.setModel(self.grid_model) self.viewMnemonic.setColumnWidth(0, 40) self.viewMnemonic.setColumnWidth(1, width) self.viewMnemonic.setColumnWidth(2, 40) QTimer.singleShot(10, setup_mnem_view)", "self.read_only = False self.columns = [ \"#\", 'Word', '#', 'Word' ] def set_words_count(self,", "utf-8 -*- # Author: Bertrand256 # Created on: 2021-04 from typing import Optional,", "List[str] = [\"\"] * 24 self.mnemonic = Mnemonic('english') self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist)", "**kwargs): return len(self.columns) def rowCount(self, parent=None, *args, **kwargs): return self.words_count / 2 def", "= False self.columns = [ \"#\", 'Word', '#', 'Word' ] def set_words_count(self, words_count):", "1: idx = row_idx else: idx = row_idx + int(self.words_count / 2) self.mnemonic_word_list[idx]", "from wnd_utils import WndUtils class SeedWordsWdg(QWidget): def __init__(self, parent): QWidget.__init__(self, parent=parent) self.layout_main: Optional[QtWidgets.QVBoxLayout]", "elif word_col_idx == 3 and row_idx < int(self.words_count / 2): word = self.mnemonic_word_list[int(self.words_count", "+ 1) + '.' elif col_idx == 1: if 0 <= row_idx <", "self.msg = QtWidgets.QLabel(self) self.msg.setWordWrap(True) self.msg.setObjectName(\"msg\") self.msg.setText('You can copy and paste the complete set", "self.mnemonic_word_list[int(self.words_count / 2) + row_idx] else: return if word and word not in", "< len(self.columns): if role in (Qt.DisplayRole, Qt.EditRole): if col_idx == 0: return str(row_idx", "pyqtSlot, QPoint, QTimer, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QWidget, QMenu,", "int(self.words_count / 2) self.mnemonic_word_list[idx] = data return True def flags(self, index): col_idx =", "orientation, role=None): if role != 0: return QVariant() if orientation == 0x1: if", "1) + '.' elif col_idx == 2: return str(int(self.words_count / 2) + row_idx", "self.actCopyWords.setShortcut(QKeySequence(\"Ctrl+C\")) # not working on Mac (used here to display # shortcut in", "2: return str(int(self.words_count / 2) + row_idx + 1) + '.' elif col_idx", "def __init__(self, parent, mnemonic_word_list, dictionary_words): QAbstractTableModel.__init__(self, parent) self.parent = parent self.dictionary_words = dictionary_words", "1: if 0 <= row_idx < int(self.words_count / 2): return self.mnemonic_word_list[row_idx] elif col_idx", "3 if word_col_idx == 1: word = self.mnemonic_word_list[row_idx] elif word_col_idx == 3 and", "on_paste_seed_words_triggered(self): try: clipboard = QApplication.clipboard() if clipboard: ws_str = clipboard.text() if isinstance(ws_str, str):", "QKeySequence from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel from mnemonic import Mnemonic", "0 <= row_idx < int(self.words_count / 2): if col_idx in (0, 1): word_col_idx", "w in enumerate(self.mnemonic_words): if idx >= self.word_count: break ws.append(w) return ws def on_copy_seed_words_triggered(self):" ]
[ "class Promise: \"\"\" Base class for the proxy class created in the closure", "closure of the lazy function. It's used to recognize promises in code. \"\"\"", "class created in the closure of the lazy function. It's used to recognize", "of the lazy function. It's used to recognize promises in code. \"\"\" pass", "in the closure of the lazy function. It's used to recognize promises in", "\"\"\" Base class for the proxy class created in the closure of the", "Base class for the proxy class created in the closure of the lazy", "the closure of the lazy function. It's used to recognize promises in code.", "class for the proxy class created in the closure of the lazy function.", "for the proxy class created in the closure of the lazy function. It's", "Promise: \"\"\" Base class for the proxy class created in the closure of", "proxy class created in the closure of the lazy function. It's used to", "created in the closure of the lazy function. It's used to recognize promises", "the proxy class created in the closure of the lazy function. It's used" ]
[ "of the block y: Optional[int] # Block type, such as \"variable_set\" type: str", "\"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The next block", "# Statements such as HANDLER for event handlers statements: Dict[str, \"Block\"] def findTail(self)", "# Location of the block y: Optional[int] # Block type, such as \"variable_set\"", "str fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The next block to process", "block of the chain, which could be this block.\"\"\" return self if self.next", "type, such as \"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue] #", "such as \"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The", "str id: str name: str @dataclass class BlockField(): name: str id: Optional[str] variable_type:", "name: str shadow: BlockShadow # TODO: Needs more test cases to verify implementation", "Location of the block x: Optional[int] # Location of the block y: Optional[int]", "BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow", "str shadow: BlockShadow # TODO: Needs more test cases to verify implementation @dataclass", "to verify implementation @dataclass class BlockMutation(): expanded: int input_init: bool @dataclass class Block():", "BlockField(): name: str id: Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow(): type:", "type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The next block to", "class Block(): # ID id: int # Location of the block x: Optional[int]", "dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str id: str name: str @dataclass", "block to process next: Optional[\"Block\"] disabled: bool # Statements such as HANDLER for", "bool @dataclass class Block(): # ID id: int # Location of the block", "type: str id: str name: str @dataclass class BlockField(): name: str id: Optional[str]", "shadow: BlockShadow # TODO: Needs more test cases to verify implementation @dataclass class", "block x: Optional[int] # Location of the block y: Optional[int] # Block type,", "int input_init: bool @dataclass class Block(): # ID id: int # Location of", "expanded: int input_init: bool @dataclass class Block(): # ID id: int # Location", "id: int # Location of the block x: Optional[int] # Location of the", "statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last block of the", "Optional, Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str id: str", "of the chain, which could be this block.\"\"\" return self if self.next is", "test cases to verify implementation @dataclass class BlockMutation(): expanded: int input_init: bool @dataclass", "Optional[int] # Location of the block y: Optional[int] # Block type, such as", "input_init: bool @dataclass class Block(): # ID id: int # Location of the", "value: str @dataclass class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class BlockValue():", "values: Dict[str, BlockValue] # The next block to process next: Optional[\"Block\"] disabled: bool", "int # Location of the block x: Optional[int] # Location of the block", "# The next block to process next: Optional[\"Block\"] disabled: bool # Statements such", "disabled: bool # Statements such as HANDLER for event handlers statements: Dict[str, \"Block\"]", "as \"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The next", "@dataclass class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str", "\"\"\"Find the last block of the chain, which could be this block.\"\"\" return", "Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last block of the chain,", "ID id: int # Location of the block x: Optional[int] # Location of", "str @dataclass class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class BlockValue(): name:", "def findTail(self) -> \"Block\": \"\"\"Find the last block of the chain, which could", "from typing import TypedDict, Optional, Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition():", "next: Optional[\"Block\"] disabled: bool # Statements such as HANDLER for event handlers statements:", "BlockShadow # TODO: Needs more test cases to verify implementation @dataclass class BlockMutation():", "class BlockValue(): name: str shadow: BlockShadow # TODO: Needs more test cases to", "Location of the block y: Optional[int] # Block type, such as \"variable_set\" type:", "# TODO: Needs more test cases to verify implementation @dataclass class BlockMutation(): expanded:", "BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow # TODO: Needs more test", "dataclass @dataclass class BlockVariableDefinition(): type: str id: str name: str @dataclass class BlockField():", "-> \"Block\": \"\"\"Find the last block of the chain, which could be this", "BlockVariableDefinition(): type: str id: str name: str @dataclass class BlockField(): name: str id:", "class BlockMutation(): expanded: int input_init: bool @dataclass class Block(): # ID id: int", "str id: Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow(): type: str fields:", "the block x: Optional[int] # Location of the block y: Optional[int] # Block", "Dict[str, BlockField] values: Dict[str, BlockValue] # The next block to process next: Optional[\"Block\"]", "import dataclass @dataclass class BlockVariableDefinition(): type: str id: str name: str @dataclass class", "The next block to process next: Optional[\"Block\"] disabled: bool # Statements such as", "bool # Statements such as HANDLER for event handlers statements: Dict[str, \"Block\"] def", "# Block type, such as \"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str,", "class BlockVariableDefinition(): type: str id: str name: str @dataclass class BlockField(): name: str", "typing import TypedDict, Optional, Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type:", "HANDLER for event handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the", "handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last block of", "Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow(): type: str fields: Dict[str, BlockField]", "@dataclass class BlockField(): name: str id: Optional[str] variable_type: Optional[str] value: str @dataclass class", "process next: Optional[\"Block\"] disabled: bool # Statements such as HANDLER for event handlers", "Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow # TODO: Needs more", "Optional[\"Block\"] disabled: bool # Statements such as HANDLER for event handlers statements: Dict[str,", "id: Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow(): type: str fields: Dict[str,", "the chain, which could be this block.\"\"\" return self if self.next is None", "the last block of the chain, which could be this block.\"\"\" return self", "BlockMutation(): expanded: int input_init: bool @dataclass class Block(): # ID id: int #", "Block type, such as \"variable_set\" type: str fields: Dict[str, BlockField] values: Dict[str, BlockValue]", "the block y: Optional[int] # Block type, such as \"variable_set\" type: str fields:", "chain, which could be this block.\"\"\" return self if self.next is None else", "x: Optional[int] # Location of the block y: Optional[int] # Block type, such", "\"Block\": \"\"\"Find the last block of the chain, which could be this block.\"\"\"", "str name: str @dataclass class BlockField(): name: str id: Optional[str] variable_type: Optional[str] value:", "type: str fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow #", "BlockField] values: Dict[str, BlockValue] # The next block to process next: Optional[\"Block\"] disabled:", "@dataclass class BlockVariableDefinition(): type: str id: str name: str @dataclass class BlockField(): name:", "findTail(self) -> \"Block\": \"\"\"Find the last block of the chain, which could be", "Statements such as HANDLER for event handlers statements: Dict[str, \"Block\"] def findTail(self) ->", "Optional[str] value: str @dataclass class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class", "from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str id: str name: str", "such as HANDLER for event handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\":", "to process next: Optional[\"Block\"] disabled: bool # Statements such as HANDLER for event", "str @dataclass class BlockField(): name: str id: Optional[str] variable_type: Optional[str] value: str @dataclass", "cases to verify implementation @dataclass class BlockMutation(): expanded: int input_init: bool @dataclass class", "Block(): # ID id: int # Location of the block x: Optional[int] #", "import TypedDict, Optional, Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str", "# Location of the block x: Optional[int] # Location of the block y:", "event handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last block", "block y: Optional[int] # Block type, such as \"variable_set\" type: str fields: Dict[str,", "BlockValue(): name: str shadow: BlockShadow # TODO: Needs more test cases to verify", "as HANDLER for event handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find", "@dataclass class BlockMutation(): expanded: int input_init: bool @dataclass class Block(): # ID id:", "implementation @dataclass class BlockMutation(): expanded: int input_init: bool @dataclass class Block(): # ID", "# ID id: int # Location of the block x: Optional[int] # Location", "str fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow # TODO:", "@dataclass class BlockValue(): name: str shadow: BlockShadow # TODO: Needs more test cases", "id: str name: str @dataclass class BlockField(): name: str id: Optional[str] variable_type: Optional[str]", "fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow: BlockShadow # TODO: Needs", "Dict[str, BlockValue] # The next block to process next: Optional[\"Block\"] disabled: bool #", "fields: Dict[str, BlockField] values: Dict[str, BlockValue] # The next block to process next:", "BlockValue] # The next block to process next: Optional[\"Block\"] disabled: bool # Statements", "@dataclass class Block(): # ID id: int # Location of the block x:", "\"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last block of the chain, which", "Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str id: str name:", "TypedDict, Optional, Dict from dataclasses import dataclass @dataclass class BlockVariableDefinition(): type: str id:", "verify implementation @dataclass class BlockMutation(): expanded: int input_init: bool @dataclass class Block(): #", "of the block x: Optional[int] # Location of the block y: Optional[int] #", "last block of the chain, which could be this block.\"\"\" return self if", "more test cases to verify implementation @dataclass class BlockMutation(): expanded: int input_init: bool", "name: str id: Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow(): type: str", "y: Optional[int] # Block type, such as \"variable_set\" type: str fields: Dict[str, BlockField]", "name: str @dataclass class BlockField(): name: str id: Optional[str] variable_type: Optional[str] value: str", "class BlockField(): name: str id: Optional[str] variable_type: Optional[str] value: str @dataclass class BlockShadow():", "TODO: Needs more test cases to verify implementation @dataclass class BlockMutation(): expanded: int", "Optional[int] # Block type, such as \"variable_set\" type: str fields: Dict[str, BlockField] values:", "for event handlers statements: Dict[str, \"Block\"] def findTail(self) -> \"Block\": \"\"\"Find the last", "variable_type: Optional[str] value: str @dataclass class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass", "which could be this block.\"\"\" return self if self.next is None else self.next.findTail()", "Needs more test cases to verify implementation @dataclass class BlockMutation(): expanded: int input_init:", "class BlockShadow(): type: str fields: Dict[str, BlockField] @dataclass class BlockValue(): name: str shadow:", "next block to process next: Optional[\"Block\"] disabled: bool # Statements such as HANDLER" ]
[ "count_dict = {} for key, val in match_dict.items(): active = np.isin(unique, val) count_dict[key]", "new_dh #period_label_bool = counts * period > string_col.size - period # now get", "# find a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1]))", "0: stringified = sorted([str(el) for el in difference]) name = \" \".join(stringified) name", "else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict()", "meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict", "if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str) unique, counts", "from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import", "counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT", "str(i) if ds.name != ds.orig_sheet_name: name = ds.name + \" \" + name", "periodic potential string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts)", "dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name in", "connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import", "change slightly (not checked for now)(should be checked in new if statment) #", "in match_dict: cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data =", "4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not", "ds in dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0]", "tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for name in dh.data_dict:", ":]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] =", "DataHolder(dh.name) word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh !=", "= meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh):", "in orig_dh} fully_represented = list() for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name", "pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in range(i + 1, len(info_array)): distances[i,", "meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category", "= name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name != ds.orig_sheet_name: name = ds.name", "entries, group them and make collective name @staticmethod def component_finder(uniques): n_el = len(uniques)", "in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data", "of data_structs to make headers_dict = {} for key, val in count_dict.items(): if", "the new_dh for name in match_dict: cond = np.array([string_col.values == sub_name for sub_name", "for key, val in match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) #", "exist for all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented)", "match_dict = SubTriangler.component_finder(unique) # now load the new_dh for name in match_dict: cond", "df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if", "def generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh: tr_spatial_dict = dict() num_cols", "len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh =", "fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list", "== 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form ==", "meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array(", "missing = np.array([\"Missing header\" in header for header in i_headers]) if np.any(missing) and", "tri_type = \"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1", "for name in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in", "out_headers: out_headers[key][i] = header for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index !=", "np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name +", "= DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name in d_cols: col", "new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values():", "in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index =", "in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind,", "fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0]", "key, val in match_dict.items(): for item in val: rev_match_dict[item] = key count_dict =", "dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh):", "sorted([str(el) for el in difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else:", "== \"aggregate\": # search for the most square ones! for name in fully_represented:", "for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0:", "meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] #", "col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id]", "triangle type: \" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict,", "meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check == 'Combined':", "raise ValueError(\"Unknown triangle type: \" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod", "if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference)", "i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header for key, val in headers_dict.items():", "= [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index)", "if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind,", "np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None,", "(used) # some entries may change slightly (used) # period may change slightly", "if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: #", "< pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for", "dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) ==", "= dict() for ds in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles)", "ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el)", "difflib import SequenceMatcher from string import digits from copy import deepcopy import numpy", "min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median", "= dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list:", "if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] =", "import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions", "kwargs['n_outputs'] new_dh_dict = dict() # This call will reset all entries in new_dh_dict", "new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in match_dict.items(): for", "\" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh):", "fully_represented = list() for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds", "= [ds.id for ds in dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list]", "item in val: rev_match_dict[item] = key count_dict = {} for key, val in", "pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name)", "DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert", "dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols =", "in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] =", "str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in", "ds in dh] # unique, counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts", "sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows #", "ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array =", "= [ds.orig_sheet_name for ds in dh] # unique, counts = np.unique(occurence_list, return_counts=True) #", "copy import deepcopy import numpy as np import pandas as pd from scipy.sparse.csgraph._traversal", "col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if ratio", "j in range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j,", "pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict):", "/ ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type", "= [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds", "name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not", "for col in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] ==", "orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING)", "\"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\"", "for index in closest_dists.index} for word_set, ds in zip(word_set_list, dh): for meta_id in", "key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index", "# check = meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array = np.array(", "in new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for", "dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod", "= {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index} for word_set, ds", "unique, counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique)", "\"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split", "1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This", "out_headers[key][i] = header for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind,", "1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False", "find the category column # Should be strings (for now) (used) # Kind", "{ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets)", "meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist", "ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check", "digits from copy import deepcopy import numpy as np import pandas as pd", "= list() if tri_type == \"aggregate\": # search for the most square ones!", "n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0] # check size", "same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name +", "period # now get the remaining #sub_period_label = unique[period_label_bool == False] match_dict =", "index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind", "meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles =", "for key in out_headers: out_headers[key][i] = header for key, val in headers_dict.items(): df_data", "{ds.orig_sheet_name for ds in orig_dh} fully_represented = list() for key, dh in dh_dict.items():", "generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh: tr_spatial_dict = dict() num_cols =", "list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n", "and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified =", "ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form =", "dict() for ds in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers", "(thus repetitive entries) (used) # some entries may change slightly (used) # period", "import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '',", "True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values()", "@staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh: tr_spatial_dict = dict()", "match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in match_dict.items(): for item", "{} for key, val in match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active])", "ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio", "name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name != ds.orig_sheet_name: name = ds.name +", "if meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find", "in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh", "el in outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE", "for i in range(n_components): comp = np.array(uniques)[labels == i] if len(comp) == 1:", "assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in", "for ds in orig_dh} fully_represented = list() for key, dh in dh_dict.items(): sheets", "change slightly (used) # period may change slightly (not checked for now)(should be", "= \"Row \" + str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else:", "[str(el) for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for", "i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified", "np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]] = comp else: # find", "tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh: d_cols", "in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get", "= DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key,", "ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data = pd.DataFrame(df_data.values,", "= np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if ratio < 0.5 and", "return fully_represented[0] # check size coherence deviations = list() if tri_type == \"aggregate\":", "[ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in", "= connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict = dict() for i in", "key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets))", "= np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and", "0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique name for i in", "= difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el) for el in difference])", "meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in", "will reset all entries in new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values():", "for j in range(len(word_set_list)): if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference", "#ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers", "period > string_col.size - period # now get the remaining #sub_period_label = unique[period_label_bool", "d_cols = d_cols[d_cols].index date_form = [] for col_name in d_cols: col = ds.df_data[col_name]", "make headers_dict = {} for key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED:", "#ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers repeated, deviating_entries =", "date_form == False for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind,", "dh with the most coherent sizes for name in fully_represented: dh = dh_dict[name]", "deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data =", "for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\":", "= meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING])", "call will reset all entries in new_dh_dict new_dh_dict[dh.name] = dh for dh in", "python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as", "def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form", "matches match_dict = dict() for i in range(n_components): comp = np.array(uniques)[labels == i]", "n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list", "= DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form =", "col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh", "np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio", "checked for now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for", "> 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median -", "a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if", "(used) # period may change slightly (not checked for now)(should be checked in", "for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals", "dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get the", "ratio = np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT", "dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size", "in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name'] ==", "ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals]", "date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False for ind", "= ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems():", "new_dh = DataHolder(dh.name) word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if", "in val: rev_match_dict[item] = key count_dict = {} for key, val in match_dict.items():", "fill the dh # First, if same length, find optimal header naming same_length", "len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances) if", "deviations.append(1-square_index) elif tri_type == \"single\": # get the dh with the most coherent", "for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\"", "ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index,", "j in range(len(word_set_list)): if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference =", "header\" in header for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header =", "temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form])", "meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) #", "unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if ratio <", "= comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data =", "in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0]", "tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs']", "for ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh]", "ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if", "common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a]", "strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind", "+ \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod", "pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else:", "unique name for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for", "# check periodic potential string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio", "new_dh_dict, meta_dh): # find potential category rows # for now, look for strings", "map each meta data to the triangle closest under it tr_ids = [ds.id", "( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" + name,", "1: return fully_represented[0] # check size coherence deviations = list() if tri_type ==", "tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"]", "match_dict: cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name])", "sheets = {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return", "deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in deviating_entries: if", "j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) >", "< 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if", "under it tr_ids = [ds.id for ds in dh] meta_ids = [ds.id for", "num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array)", "df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and make collective name", "name = str(i) if ds.name != ds.orig_sheet_name: name = ds.name + \" \"", "= header for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val]", "return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh)", "closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals] word_set.update(word_list) @staticmethod", "for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh} if", "if len(comp) == 1: match_dict[comp[0]] = comp else: # find a common name", "new if statment) # Should get tag matches in dict (not checked for", "from string import digits from copy import deepcopy import numpy as np import", "if same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i] for", "in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) == 0:", "# print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING])", "== 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh =", "ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col],", "df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key, df_data,", "len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh", "difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i) if", "ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and", "dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if n_outputs ==", "#check if median is a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for", "make collective name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for", "ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col]", "same_length = np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict) for i in", "d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name in d_cols:", "range(len(info_array)): for j in range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :]", "get number of data_structs to make headers_dict = {} for key, val in", "in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict", "new_dh_dict = dict() # This call will reset all entries in new_dh_dict new_dh_dict[dh.name]", "spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat", "= DataHolder(dh.name) word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh", "i in range(len(info_array)): for j in range(i + 1, len(info_array)): distances[i, j] =", "= word_set_list[i].copy() for j in range(len(word_set_list)): if j != i and ds.orig_sheet_name ==", "i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)):", "orig_name) if len(fully_represented) == 1: return fully_represented[0] # check size coherence deviations =", "headers_dict = {} for key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key]", "tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) /", "> pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in", "new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds", "SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name,", "> meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check ==", "dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM)", "name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data", "name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name']", "ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n > 0:", "np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for name in", "checked in new if statment) # Should get tag matches in dict (not", "DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING)", "split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data ==", "= dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1] /", "= \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name !=", "# unique, counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) /", "comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data", "else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False ds.df_data", "dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make", "i_headers = np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing header\" in header", "occurence_list = [ds.orig_sheet_name for ds in dh] # unique, counts = np.unique(occurence_list, return_counts=True)", "orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated", "category column # Should be strings (for now) (used) # Kind of periodic", "- pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" + tri_type)", "SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column", "# First, if same length, find optimal header naming same_length = np.std(len_array) ==", "> pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str) unique, counts = np.unique(string_col,", "np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers", "== SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col])", "name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in", "= np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if n_outputs == 1: #", "for ds in dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high =", "rev_match_dict = dict() for key, val in match_dict.items(): for item in val: rev_match_dict[item]", "of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) > 0:", "np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median", "ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values ==", "< pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds,", "for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name ==", "identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form =", "tri_type == \"single\": # get the dh with the most coherent sizes for", "= ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles", "new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh", "[ds.id for ds in dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content", "[int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array =", "sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows", "not_date_form = date_form == False for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns:", "headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the", "from copy import deepcopy import numpy as np import pandas as pd from", "in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind,", "np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" +", "if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in", "@staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for ds", "# for now, look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols =", "unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if ratio <", "np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique)", "pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list)))", "= DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles ==", "general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el", "not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" +", "SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels =", "pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier,", "may change slightly (used) # period may change slightly (not checked for now)(should", "dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type:", "{index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index} for word_set, ds in", "= np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids =", "column # Should be strings (for now) (used) # Kind of periodic (thus", "< 0.5: row_name = \"Row \" + str(ind) if row_name in new_dh_dict: new_dh", "word_list = [str(el) for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict =", "coherence deviations = list() if tri_type == \"aggregate\": # search for the most", "np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict", "new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split)", "the most coherent sizes for name in fully_represented: dh = dh_dict[name] n_sheet_dev =", "np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for name, val in cat_row.iteritems(): if", "in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers:", "- \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them", "= dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if", "dtype=int) for enum, key in enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val]", "in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index))", "ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids", "np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if ratio < 0.5 and len(unique)/cat_row.size", "import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from", "temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data,", "dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds,", "header for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles", "np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size,", "look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW", "repetitive entries) (used) # some entries may change slightly (used) # period may", "n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in", "if ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row \" +", "= np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic", "match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond)", "pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el", "for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = []", "**kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\"", "DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form", "ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances =", "for j in range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] -", "= tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"]", "if ds.name != ds.orig_sheet_name: name = ds.name + \" \" + name new_dh.add_sheet(name,", "\" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name != ds.orig_sheet_name:", "for ds in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers =", "for ds in dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content =", "col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING:", "SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling", "meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a reasonable", "ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data =", "[] for col_name in d_cols: col = ds.df_data[col_name] if len(date_form) == 0: date_form", "= [str(el) for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict()", "j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances) if min_dist <", "range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) -", "range(n_components): comp = np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]] = comp", "= tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))]", "# Now fill the dh # First, if same length, find optimal header", "@staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column # Should be", "new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each meta data to the", "comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod", "each meta data to the triangle closest under it tr_ids = [ds.id for", "ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high =", "name in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list", "df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \"", "= dict() for key, val in match_dict.items(): for item in val: rev_match_dict[item] =", "- np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict =", "distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a reasonable measure of", "if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list =", "for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev #", "= ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] ==", "+ name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh,", "in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals] word_set.update(word_list)", "in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum]", "pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict:", "stringified = sorted([str(el) for el in difference]) name = \" \".join(stringified) name =", "+ len_dev # if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n)", "ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type ==", "+ np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches", "== 0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) ==", "new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds,", "ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size", "ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries)", "new_dh.add_sheet(ds.name + \" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar", "for ds in dh] # unique, counts = np.unique(occurence_list, return_counts=True) # dev =", "closest under it tr_ids = [ds.id for ds in dh] meta_ids = [ds.id", "comp else: # find a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]),", "dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict,", "# meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if", "dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh,", "np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array)", "ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers repeated, deviating_entries", "closest_dists.index} for word_set, ds in zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id]", "coherent sizes for name in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs", "else: name = str(i) if ds.name != ds.orig_sheet_name: name = ds.name + \"", "== \"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for", "== i] if len(comp) == 1: match_dict[comp[0]] = comp else: # find a", "for now)(should be checked in new if statment) # Should get tag matches", "= SubTriangler.component_finder(unique) # now load the new_dh for name in match_dict: cond =", "use_median = False outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh):", "ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])]", "not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now", "# [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array", "= len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in range(i,", "= pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" -", "= tr_spatial_dict use_median = True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']])", "new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each meta data", "meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array -", "= np.max(counts) / cat_row.size if ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name", "> pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name]", "meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))]", "- info_array[j, :]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False", "new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict =", "SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" -", "== name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for", "for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key", "[] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for name, val", "rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) #", "len(headers_dict[key]) # Now fill the dh # First, if same length, find optimal", "in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio =", "check size coherence deviations = list() if tri_type == \"aggregate\": # search for", "make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if", "# some entries may change slightly (used) # period may change slightly (not", "len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if n_outputs == 1:", "val) count_dict[key] = np.sum(counts[active]) # get number of data_structs to make headers_dict =", "same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i] for val", "= {} for key, val in match_dict.items(): active = np.isin(unique, val) count_dict[key] =", "and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header for", "ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that", "= np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median =", "import digits from copy import deepcopy import numpy as np import pandas as", "\" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def", "ds.name != ds.orig_sheet_name: name = ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data,", "#df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for", "new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols", "len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col)", ">= pp.MIN_LABEL_SIM) # make matches match_dict = dict() for i in range(n_components): comp", "pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name)", "= False outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds,", "in range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :])", "!= None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most", "fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 =", "in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list =", "for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if", "index=meta_ids) #check if median is a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh)", "np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids = {index:", "SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return", "# only decompositions that exist for all sheets are acknowledged fully_represented, n_orig_sheets =", "df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str) unique,", "len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev", "square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get the dh with the most", "name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category", "@staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each meta data to the triangle", "/ 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name']", "temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated:", "[ds.orig_sheet_name for ds in dh] # unique, counts = np.unique(occurence_list, return_counts=True) # dev", "!= ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data =", "= TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0:", "find similar entries, group them and make collective name @staticmethod def component_finder(uniques): n_el", "np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict", "np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev =", "@staticmethod def scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index", "headers # strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in", "ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map", "if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist =", "orig_dh} fully_represented = list() for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for", "df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key])", "np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name", "name in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh]", "d_cols: col = ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1", "= comp else: # find a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0,", "meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat =", "dh # First, if same length, find optimal header naming same_length = np.std(len_array)", "sizes for name in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs -", "rev_match_dict[item] = key count_dict = {} for key, val in match_dict.items(): active =", "all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1:", "for name in match_dict: cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0)", "dict() # This call will reset all entries in new_dh_dict new_dh_dict[dh.name] = dh", "= counts * period > string_col.size - period # now get the remaining", "len_array[enum] = len(headers_dict[key]) # Now fill the dh # First, if same length,", "<= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns])", "header for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for", "get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented", "new_dh_dict, meta_dh): # find the category column # Should be strings (for now)", "acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0] #", "== SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col", "entries) (used) # some entries may change slightly (used) # period may change", "match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols =", "len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median", "if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in", "count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for", "in dh] # unique, counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts -", "# Kind of periodic (thus repetitive entries) (used) # some entries may change", "for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True)", "== key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the dh # First,", "them and make collective name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m =", "size coherence deviations = list() if tri_type == \"aggregate\": # search for the", "temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind]) #wordset.add(ds.name)", "for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances", "d_cols[d_cols].index date_form = [] for col_name in d_cols: col = ds.df_data[col_name] if len(date_form)", "dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name", "dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in range(i, n_el): dist_m[i][j]", "python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper", "SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return", "dict (not checked for now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name =", "pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row,", "in match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number of", "num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"]", "period may change slightly (not checked for now)(should be checked in new if", "for col in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] ==", "= {} for key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] =", "import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct", "in range(n_components): comp = np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]] =", "= sorted([str(el) for el in difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits)", "import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh,", "new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name", "+ tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh =", "tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max", "in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This call will reset", "len(unique)/cat_row.size < 0.5: row_name = \"Row \" + str(ind) if row_name in new_dh_dict:", "key in out_headers: out_headers[key][i] = header for key, val in headers_dict.items(): df_data =", "Now fill the dh # First, if same length, find optimal header naming", "- period # now get the remaining #sub_period_label = unique[period_label_bool == False] match_dict", "SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find", "unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now load the new_dh for name", "= set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers #", "= dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds", "word_set_list[i].copy() for j in range(len(word_set_list)): if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name:", "for key, val in match_dict.items(): for item in val: rev_match_dict[item] = key count_dict", "dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds in", "are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0]", "#content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids)", "if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if", "np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True", "square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean()", "meta_dh): # find the category column # Should be strings (for now) (used)", "most coherent sizes for name in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0,", "\" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group", "comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds)", "= np.array([\"Missing header\" in header for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)):", "columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in", "the category column # Should be strings (for now) (used) # Kind of", "triangle closest under it tr_ids = [ds.id for ds in dh] meta_ids =", "date_form = DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form", "meta_dh)) if meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) #", "n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict = dict() for", "tr_spatial_dict use_median = True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for", "else: new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def", "- dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev", "num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size >", "dh.n) # else: # occurence_list = [ds.orig_sheet_name for ds in dh] # unique,", "val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index,", "reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index)", "data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that exist for all sheets are", "for el in outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) +", "# get number of data_structs to make headers_dict = {} for key, val", "range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing header\" in", "for i in range(n_el): for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i],", "= dict() # This call will reset all entries in new_dh_dict new_dh_dict[dh.name] =", "comp = np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]] = comp else:", "= ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] =", "outer_dict = dict() for ds in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data,", "\" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and", "np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number of data_structs to make headers_dict", "= general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for", "== \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data, sub_df_profiles,", "fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list()", "uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m", "= {ds.orig_sheet_name for ds in orig_dh} fully_represented = list() for key, dh in", "meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check =", "ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if", "= SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in match_dict.items(): for item in", "< 0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row \" + str(ind) if", "from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS", "headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val]", "repeated headers # strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind", "kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This call will reset all", "pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \"", "in closest_dists.index} for word_set, ds in zip(word_set_list, dh): for meta_id in closest_dists.index: if", "optimal header naming same_length = np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict)", "with the most coherent sizes for name in fully_represented: dh = dh_dict[name] n_sheet_dev", "ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index", "= str(i) if ds.name != ds.orig_sheet_name: name = ds.name + \" \" +", "val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict),", "i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i]", "'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found')", "== False] match_dict = SubTriangler.component_finder(unique) # now load the new_dh for name in", "name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for ds in dh:", "print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for", "for item in val: rev_match_dict[item] = key count_dict = {} for key, val", "= np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for name, val in cat_row.iteritems():", "in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind]) #wordset.add(ds.name) #return wordset return set()", "# Find the most unique name for i in range(len(word_set_list)): ds = dh.data_struct_list[i]", "in range(len(word_set_list)): if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j])", "in range(len(info_array)): for j in range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i,", "get the dh with the most coherent sizes for name in fully_represented: dh", "def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column # Should be strings", "new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in", "len(fully_represented) == 1: return fully_represented[0] # check size coherence deviations = list() if", "= np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list = [ds.orig_sheet_name for ds in", "= np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if", "in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m)", "0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))]", "ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for name in dh.data_dict: info_array =", "for split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data", "ds in dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index))", "new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs)", "np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name])", "col = ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1 else:", "temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill", "meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh))", "[ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids)", "repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col", "= ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size", "horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows # for now, look for", "np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles", "return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown", "col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio >", "row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh", "pandas as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier", "pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if", "0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds", "min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return outer_dict @staticmethod def", "if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] =", "Kind of periodic (thus repetitive entries) (used) # some entries may change slightly", "key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the dh # First, if", "comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict", "temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind])", "new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols =", "- meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist +", "= SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels", "orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented =", "if tri_type == \"aggregate\": # search for the most square ones! for name", "def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for ds in", "= ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only", "/ df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str)", "= np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] =", "TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper", "cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles", "Should be strings (for now) (used) # Kind of periodic (thus repetitive entries)", "in difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i)", "= TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind,", "Find the most unique name for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference", "np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist", "\"Row \" + str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh", "dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh:", "and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \"", "+ name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential", "dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el) for el", "= SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds", "#kill repeated headers # strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for", "key in enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict", "= np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] =", "component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j", "@staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that exist for all", "n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh: d_cols =", "@staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def", "columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key,", "in range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing header\"", "deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets", "orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not (", "for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles =", "else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name", "in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")):", "i in range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing", "np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1)", "for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n >", "False outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict,", "headers_dict.values()]) missing = np.array([\"Missing header\" in header for header in i_headers]) if np.any(missing)", "> string_col.size - period # now get the remaining #sub_period_label = unique[period_label_bool ==", "= dict() for i in range(n_components): comp = np.array(uniques)[labels == i] if len(comp)", "name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list):", "= list() for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in", "!= \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col])", "from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits =", "ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name in", "all entries in new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict,", "in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name", "measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) >", "new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool =", "= col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size if", "now)(should be checked in new if statment) # Should get tag matches in", "sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) < 4: new_ds =", "for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median", "the most square ones! for name in fully_represented: dh = dh_dict[name] square_ind1 =", "# find the category column # Should be strings (for now) (used) #", "list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in", "now, look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >=", "cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) /", "strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for", "# Should get tag matches in dict (not checked for now) df_data =", "Should get tag matches in dict (not checked for now) df_data = ds.df_data", "get the remaining #sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now", "np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high:", "scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from", "np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) /", "index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind])", "2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist", "1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances)", "in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int)", "info_array[j, :]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median']", "= ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that exist", "= d_cols[d_cols].index date_form = [] for col_name in d_cols: col = ds.df_data[col_name] if", "DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from", "df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles,", "= use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds,", "from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod", "2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist", "TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col]", "in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill", "+ \" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries,", "dh] # unique, counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET))", "= key count_dict = {} for key, val in match_dict.items(): active = np.isin(unique,", "in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or", "/ len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" + tri_type) deviations =", "== 1) not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form])", "+ pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in range(i + 1, len(info_array)):", "closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in", "== False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type,", "the dh with the most coherent sizes for name in fully_represented: dh =", "= np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev", "== sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if", "a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name =", "n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el)", "SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict", "= np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev +", "rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the dh #", "entries in new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh)", "= SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0] # check size coherence", "the remaining #sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now load", "len(comp) == 1: match_dict[comp[0]] = comp else: # find a common name block", "name in match_dict: cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data", "= num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns])", "for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict,", "in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for", "str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts", "meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the", "vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh: tr_spatial_dict", "@staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in", "columns=tr_ids, index=meta_ids) #check if median is a reasonable measure of distance spatial_info =", "SequenceMatcher from string import digits from copy import deepcopy import numpy as np", "num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in", "now) (used) # Kind of periodic (thus repetitive entries) (used) # some entries", "> 0: stringified = sorted([str(el) for el in difference]) name = \" \".join(stringified)", "word_set_list): # map each meta data to the triangle closest under it tr_ids", "= unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now load the new_dh for", "meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if meta_high", "in d_cols: col = ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) ==", "col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index", "= np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return", "in new if statment) # Should get tag matches in dict (not checked", ":] - info_array[j, :]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median =", "in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array =", "- \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): #", "orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each meta", "adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING])", "digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type", "and make collective name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el))", "= date_form == False for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if", "dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1,", "1: match_dict[comp[0]] = comp else: # find a common name block = SequenceMatcher(None,", "orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and make collective name @staticmethod def", "\" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find", "for val in headers_dict.values()]) missing = np.array([\"Missing header\" in header for header in", ":].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated", "len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def", "counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if ratio < 0.5", "in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool", "dict() for i in range(n_components): comp = np.array(uniques)[labels == i] if len(comp) ==", "index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset =", "dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for", "= {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented,", "\"single\": # get the dh with the most coherent sizes for name in", "if check == 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in", "in out_headers: out_headers[key][i] = header for key, val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index", "distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) > 0: ds_low", "sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and", "df_data = ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in", "in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs: tri_type", "ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles =", "= np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets =", "col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind],", "ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row \" + str(ind)", "spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) > 0: ds_low =", "class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh'", "= str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh", "len(difference) > 0: stringified = sorted([str(el) for el in difference]) name = \"", "temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten())", "ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name", "vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el) for el in vals] word_set.update(word_list) @staticmethod def", "word_set, ds in zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA:", "@staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows # for now,", "if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] =", "== 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #", "load the new_dh for name in match_dict: cond = np.array([string_col.values == sub_name for", "ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions", "< pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in", "and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh", "in ds.df_data.columns: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data", "date_form = [] for col_name in d_cols: col = ds.df_data[col_name] if len(date_form) ==", "val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name)", "[int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) >", "index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form])", "enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val]", "word_set_list) # Find the most unique name for i in range(len(word_set_list)): ds =", "if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if", "= pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile,", "slightly (not checked for now)(should be checked in new if statment) # Should", "#wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col] !=", "np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list = [ds.orig_sheet_name for ds in dh]", "in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL:", "be checked in new if statment) # Should get tag matches in dict", "ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for", "header naming same_length = np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict) for", "col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS:", "= new_dh #period_label_bool = counts * period > string_col.size - period # now", "\"aggregate\": # search for the most square ones! for name in fully_represented: dh", "= np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] =", "in enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or", "find optimal header naming same_length = np.std(len_array) == 0 if same_length: out_headers =", "val: rev_match_dict[item] = key count_dict = {} for key, val in match_dict.items(): active", "meta_dh, word_set_list) # Find the most unique name for i in range(len(word_set_list)): ds", "square ones! for name in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for", "len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list = list() for", "> 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for", "in header for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0]", "== \"single\": # get the dh with the most coherent sizes for name", "in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh:", "python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def", "n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict()", "similar entries, group them and make collective name @staticmethod def component_finder(uniques): n_el =", "= np.sum(counts[active]) # get number of data_structs to make headers_dict = {} for", "dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented = list() for key,", "ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col]", "DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False for ind in ds.df_data.index[not_date_form]: for", "2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] =", "for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if", "in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind,", "dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh)", "ones! for name in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds", "DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts * period > string_col.size - period", "val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key", "str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh =", "False for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col] !=", "in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if n_outputs", "= np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles =", "in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2", "slightly (used) # period may change slightly (not checked for now)(should be checked", "for word_set, ds in zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id] <", "ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh", "col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio", "= np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists =", "SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind,", "> 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) /", "== 0 if same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers =", "tri_type, n_outputs): # only decompositions that exist for all sheets are acknowledged fully_represented,", "divide_meta_data(dh, meta_dh, word_set_list): # map each meta data to the triangle closest under", "make matches match_dict = dict() for i in range(n_components): comp = np.array(uniques)[labels ==", "new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds,", "string import digits from copy import deepcopy import numpy as np import pandas", "periodic (thus repetitive entries) (used) # some entries may change slightly (used) #", "- dh.n) # else: # occurence_list = [ds.orig_sheet_name for ds in dh] #", "choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict", "string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str) unique, counts =", "as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler:", "sub_name for sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name", "= df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) < 4:", "= pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a reasonable measure of distance", "= np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] =", "= ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values", "SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col =", ":] == closest_dists[index])] for index in closest_dists.index} for word_set, ds in zip(word_set_list, dh):", "df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio =", "ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def", "SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0] # check size coherence deviations", "name == \"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name)", "= date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict,", "else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False for", "# period may change slightly (not checked for now)(should be checked in new", "headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the dh # First, if same", "if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This call", "group them and make collective name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m", "python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder,", "= np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and", "in headers_dict.values()]) missing = np.array([\"Missing header\" in header for header in i_headers]) if", "numpy as np import pandas as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions", "tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median", "matches in dict (not checked for now) df_data = ds.df_data df_profiles = ds.df_profiles", "/ 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"]", "ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose", "the triangle closest under it tr_ids = [ds.id for ds in dh] meta_ids", "# This call will reset all entries in new_dh_dict new_dh_dict[dh.name] = dh for", "date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name,", "remaining #sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now load the", "that exist for all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if", "for name in fully_represented: dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n)", "now get the remaining #sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) #", "else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id,", "\".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name != ds.orig_sheet_name: name", "dev = n_sheet_dev + len_dev # if n_outputs == 1: # dev =", "def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh}", "remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs", "for el in difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name", "string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) / col.size", "== dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el) for", "1) not_date_form = date_form == False for ind in ds.df_data.index[not_date_form]: for col in", "@staticmethod def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el):", "if same length, find optimal header naming same_length = np.std(len_array) == 0 if", "in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)): if", "scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form =", "= ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form", "col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset", "el['vert_median']]) for el in outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array)))", "ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each", "!= i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if len(difference) > 0:", "pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits", "(not checked for now)(should be checked in new if statment) # Should get", "else: raise ValueError(\"Unknown triangle type: \" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)]", "out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i] for val in", "and len(unique)/cat_row.size < 0.5: row_name = \"Row \" + str(ind) if row_name in", "val in headers_dict.items(): df_data = ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index !=", "for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1))", "meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals =", "get tag matches in dict (not checked for now) df_data = ds.df_data df_profiles", "# choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name],", "= np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for name", "'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs:", "distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances) if min_dist", "match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number of data_structs", "val in match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number", "# if check == 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el", "may change slightly (not checked for now)(should be checked in new if statment)", "np.sum(counts[active]) # get number of data_structs to make headers_dict = {} for key,", "= kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict", "ds in zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if", "deviations = list() if tri_type == \"aggregate\": # search for the most square", "ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio", "orig_name, tri_type, n_outputs): # only decompositions that exist for all sheets are acknowledged", "if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique name", "count_dict[key] = np.sum(counts[active]) # get number of data_structs to make headers_dict = {}", "= np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing header\" in header for", "= ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name", "headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in", "counts = np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev)", "for all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) ==", "new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for", "dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)): if j != i and", "the dh # First, if same length, find optimal header naming same_length =", "ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :]", "# find potential category rows # for now, look for strings str_ratio =", "headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for", "/ cat_row.size if ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row", "n_sheet_dev + len_dev # if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET -", "to the triangle closest under it tr_ids = [ds.id for ds in dh]", "np.array([val[i] for val in headers_dict.values()]) missing = np.array([\"Missing header\" in header for header", "df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and make collective name @staticmethod", "np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if", "length, find optimal header naming same_length = np.std(len_array) == 0 if same_length: out_headers", "if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else:", "in match_dict.items(): for item in val: rev_match_dict[item] = key count_dict = {} for", "ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length: df_data", "for now, look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio", "counts * period > string_col.size - period # now get the remaining #sub_period_label", "name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j", "the most unique name for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference =", "data to the triangle closest under it tr_ids = [ds.id for ds in", "zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in", "in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs", "= True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in", "# strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]:", "el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds in", "name = ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name]", "= np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in range(i, n_el): dist_m[i][j] =", "most unique name for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy()", "fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return fully_represented[0] # check", "= np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat)", "meta_high = np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check == 'Combined': #", "for now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name,", "tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for", "n_outputs): # only decompositions that exist for all sheets are acknowledged fully_represented, n_orig_sheets", "entries may change slightly (used) # period may change slightly (not checked for", "ind in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and", "median is a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in", "DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val", "distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index} for word_set, ds in zip(word_set_list,", "rows # for now, look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols", "meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in", "outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod", "col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist =", "new_dh for name in match_dict: cond = np.array([string_col.values == sub_name for sub_name in", "in dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if", "== SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row =", "= DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts * period > string_col.size -", "1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([])", "tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max tr_spatial_dict['name'] = ds.name outer_dict[ds.id]", "or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key]) # Now fill the dh", "ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": #", "PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class", "in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type,", "#sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique) # now load the new_dh", "kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs =", "= dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)): if j != i", "in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten()", "or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in", "DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles =", "check periodic potential string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio =", "< pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name)", "= meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array", "check = meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array = np.array( #", "ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): #", "closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index} for word_set,", "word_set_list = list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None:", "ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that exist for", "for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict,", "meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique name for", "and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile =", "if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in", "new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts * period > string_col.size", "cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] =", "same length, find optimal header naming same_length = np.std(len_array) == 0 if same_length:", "ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size", "in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh,", "num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index)", "import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp", "pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data =", "= np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array", "dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0]", "dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key)", "use_median = True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el", "outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh)", "= ds.df_data.loc[ds.df_data.index != ind, val] df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val] if same_length:", "python_back_end.triangle_formatting.triangle_chopper import TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('',", "= SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index))", "col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low) distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists", "= np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get the dh with", "= (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]:", "difference = word_set_list[i].copy() for j in range(len(word_set_list)): if j != i and ds.orig_sheet_name", "pp.MIN_LABEL_SIM) # make matches match_dict = dict() for i in range(n_components): comp =", "closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list", "category rows # for now, look for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1]", "as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import", "if len(difference) > 0: stringified = sorted([str(el) for el in difference]) name =", "meta_dh): # find potential category rows # for now, look for strings str_ratio", "el in difference]) name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name =", "== 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list =", "python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from", "return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols", "def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs): # only decompositions that exist for all sheets", "divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict,", "info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name'] == name]) distances", "new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts", "0.5: row_name = \"Row \" + str(ind) if row_name in new_dh_dict: new_dh =", "= comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols =", "meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median", "ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING,", "pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data =", "meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type']", "= pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check", "columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles", "check == 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns])", "= new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): # map each meta data to", "= SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp", "= pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data", "df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: #", "= n_sheet_dev + len_dev # if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET", "import numpy as np import pandas as pd from scipy.sparse.csgraph._traversal import connected_components from", "# search for the most square ones! for name in fully_represented: dh =", "== 1) not_date_form = date_form == False for ind in ds.df_data.index[not_date_form]: for col", "def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows # for now, look", "\"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This call will", "np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in range(i +", "# dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list = [ds.orig_sheet_name for", "\" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh,", "if len(date_form) == 0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form,", "from difflib import SequenceMatcher from string import digits from copy import deepcopy import", "len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" + tri_type) deviations = np.array(deviations)", "np.array([\"Missing header\" in header for header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header", "= kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs", "= str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique,", "enum, key in enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val] not in", "square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for", "new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in", "= np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form])", "if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None):", "else: # occurence_list = [ds.orig_sheet_name for ds in dh] # unique, counts =", "range(len(word_set_list)): if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name: difference = difference.difference(word_set_list[j]) if", "in dict (not checked for now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name", "kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs =", "dh.data_dict: info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name'] == name])", "i in range(n_el): for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio()", "np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array),", "0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds,", "outer_dict[ds.id] = tr_spatial_dict use_median = True for name in dh.data_dict: info_array = np.array([np.array([el['hori_median'],", "np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist", "new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts * period", "= np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]:", "def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for", "ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh =", "decompositions that exist for all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name)", "closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]: vals = meta_dh.id_dict[meta_id].df_data.values.flatten() word_list = [str(el)", "meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids,", "0 if same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i]", "This call will reset all entries in new_dh_dict new_dh_dict[dh.name] = dh for dh", "new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name,", "kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\" if \"tri_type\" in kwargs: tri_type =", "= i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header for key, val in", "+ key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and make", "str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name]", "index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a reasonable measure", "dict() for key, val in match_dict.items(): for item in val: rev_match_dict[item] = key", "dh): for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id in closest_ids[meta_id]:", "tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else:", "i] if len(comp) == 1: match_dict[comp[0]] = comp else: # find a common", "data_structs to make headers_dict = {} for key, val in count_dict.items(): if val", "= [] len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for name,", "0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row \" + str(ind) if row_name", "i in range(n_components): comp = np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]]", "= np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist = np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE:", "kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict =", "key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find similar entries, group them and make collective", "'', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh']", "list() for key, dh in dh_dict.items(): sheets = {ds.orig_sheet_name for ds in dh}", "closest_dists[index])] for index in closest_dists.index} for word_set, ds in zip(word_set_list, dh): for meta_id", "new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the", "SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in", "most square ones! for name in fully_represented: dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1]", "def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type = \"single\"", "= deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()])", "= pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key, df_data, df_profiles,", "connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict = dict() for i in range(n_components):", "/ 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array)", "np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"] = np.iinfo(np.uint32).max", "match_dict.items(): for item in val: rev_match_dict[item] = key count_dict = {} for key,", "len_dev # if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) #", "for i in range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()]) missing =", "import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings", "new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds(): if not np.all(split.df_profiles", "vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column # Should be strings (for", "= ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds)", "# map each meta data to the triangle closest under it tr_ids =", "== False for ind in ds.df_data.index[not_date_form]: for col in ds.df_data.columns: if ds.df_data.loc[ind, col]", "meta_dh, word_set_list): # map each meta data to the triangle closest under it", "word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds in dh: tr_spatial_dict =", "ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind]) #wordset.add(ds.name) #return wordset return", "from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter", "len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds, meta_dh):", "use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict,", "if name == \"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name,", "new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name) new_dh_dict[dh.name] = new_dh @staticmethod def divide_meta_data(dh, meta_dh, word_set_list): #", "> 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique name for i", "@staticmethod def make_standard_triangles(dh, **kwargs): assert 'meta_dh' in kwargs meta_dh = kwargs['meta_dh'] tri_type =", "strings (for now) (used) # Kind of periodic (thus repetitive entries) (used) #", "if statment) # Should get tag matches in dict (not checked for now)", "orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented = list() for key, dh", "find potential category rows # for now, look for strings str_ratio = (ds.df_profiles", "len_array = np.zeros(len(headers_dict), dtype=int) for enum, key in enumerate(headers_dict): for name, val in", "potential category rows # for now, look for strings str_ratio = (ds.df_profiles ==", "* period > string_col.size - period # now get the remaining #sub_period_label =", "2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] =", "np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header", "False] match_dict = SubTriangler.component_finder(unique) # now load the new_dh for name in match_dict:", "naming same_length = np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict) for i", "= np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number of data_structs to make", "if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el", "fully_represented[0] # check size coherence deviations = list() if tri_type == \"aggregate\": #", "for el in vals] word_set.update(word_list) @staticmethod def generate_tr_spatial_info(dh): outer_dict = dict() for ds", "string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check", "return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name) word_set_list =", "DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = [] for col_name in d_cols: col =", "be strings (for now) (used) # Kind of periodic (thus repetitive entries) (used)", "ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)): if j !=", "= 1 if \"n_outputs\" in kwargs: n_outputs = kwargs['n_outputs'] new_dh_dict = dict() #", "np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev # if n_outputs == 1: # dev", "list() if tri_type == \"aggregate\": # search for the most square ones! for", "= new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts *", "distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index,", "val in headers_dict.values()]) missing = np.array([\"Missing header\" in header for header in i_headers])", "SubTriangler.component_finder(unique) # now load the new_dh for name in match_dict: cond = np.array([string_col.values", "/ col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) <", "now load the new_dh for name in match_dict: cond = np.array([string_col.values == sub_name", "name = name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name != ds.orig_sheet_name: name =", "= np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array)", "col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind],", "+ 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist =", "in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols", "block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name] =", "not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name", "TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers] if num_cols.size > 0: tr_col_int_array", "new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique)", "False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs):", "else: # find a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0,", "= ds.name outer_dict[ds.id] = tr_spatial_dict use_median = True for name in dh.data_dict: info_array", "dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev =", "tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name]", "= ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip repeated headers repeated,", "\"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING: temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col]) temp_profile", "meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) # choose dh in dict dh_name =", "for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high = np.max(np.array(meta_ds.df_data.index)) # check", "cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts)", "= new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in match_dict.items():", "if num_cols.size > 0: tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] =", "in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh,", "el in num_cols.columns]) tr_spatial_dict[\"hori_median\"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))] tr_spatial_dict[\"int_array\"] = tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"]", "# check size coherence deviations = list() if tri_type == \"aggregate\": # search", "for enum, key in enumerate(headers_dict): for name, val in cat_row.iteritems(): if rev_match_dict[val] not", "# dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle", "== closest_dists[index])] for index in closest_dists.index} for word_set, ds in zip(word_set_list, dh): for", "if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else:", "np import pandas as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent", "np.max(np.array(meta_ds.df_data.index)) # check = meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array =", "tr_col_int_array.reshape((tr_col_int_array.size, 1)) tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] =", "<gh_stars>0 from difflib import SequenceMatcher from string import digits from copy import deepcopy", "return_counts=True) ratio = np.max(counts) / col.size if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio >", "DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits) @staticmethod def make_standard_triangles(dh, **kwargs):", "sub_name in match_dict[name]]).any(axis=0) sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\"", "match_dict[name] = comp return match_dict @staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols", "range(i + 1, len(info_array)): distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :]) min_dist", "+ col_dist closest_dists = distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for", "val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key])", "from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import PROGRAM_PARAMETERS as pp from python_back_end.triangle_formatting.triangle_chopper import", "el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)):", "if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size: meta_high", "pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True)", "orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict, meta_dh): # find potential category rows # for", "j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m +", "if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in", "el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if", "difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el) for el in difference]) name", ":] unique, counts = np.unique(cat_row, return_counts=True) ratio = np.max(counts) / cat_row.size if ratio", "for strings str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for", "np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size > meta_ds.df_data.size:", "outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i", "import pandas as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import general_adjacent from", "np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size if string_ratio > pp.MIN_STRING_RATIO_CAT_COL: # check periodic potential", "if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return outer_dict @staticmethod", "for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh) #", "#wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers", "+ str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name)", "ds in dh: tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns)", "= np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \"", "np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header for key,", "= np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values() if el['name'] == name]) distances =", "= len(headers_dict[key]) # Now fill the dh # First, if same length, find", "deepcopy(headers_dict) for i in range(len_array[0]): i_headers = np.array([val[i] for val in headers_dict.values()]) missing", "meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): #", "= pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data", "import deepcopy import numpy as np import pandas as pd from scipy.sparse.csgraph._traversal import", "ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in", "word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list)", "# now get the remaining #sub_period_label = unique[period_label_bool == False] match_dict = SubTriangler.component_finder(unique)", "+ \" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod def horizontal_category_division(ds, new_dh_dict,", "!= ind, val] if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values,", "orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented = list()", "distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in", "date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False ds.df_data =", "SubTriangler.component_finder(unique) rev_match_dict = dict() for key, val in match_dict.items(): for item in val:", "= np.std(len_array) == 0 if same_length: out_headers = deepcopy(headers_dict) for i in range(len_array[0]):", "el in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array", "checked for now)(should be checked in new if statment) # Should get tag", "only decompositions that exist for all sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict,", "match_dict[comp[0]] = comp else: # find a common name block = SequenceMatcher(None, comp[0],", "dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds", "for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m", "string_col.size - period # now get the remaining #sub_period_label = unique[period_label_bool == False]", "/ 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))]", "in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a", "for key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array", "# find similar entries, group them and make collective name @staticmethod def component_finder(uniques):", "key, val in match_dict.items(): active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get", "\" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh", "for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind]) #wordset.add(ds.name) #return wordset", "in range(n_el): for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m", "ratio = np.max(counts) / cat_row.size if ratio < 0.5 and len(unique)/cat_row.size < 0.5:", "return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds)", "0: if spatial_info['use_median']: meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))] col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median'])", "index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" +", "potential string_col = col.astype(str) unique, counts = np.unique(string_col, return_counts=True) ratio = np.max(counts) /", "type: \" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name):", "number of data_structs to make headers_dict = {} for key, val in count_dict.items():", "as np import pandas as pd from scipy.sparse.csgraph._traversal import connected_components from python_back_end.utilities.help_functions import", "dh = dh_dict[name] n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for", "val in match_dict.items(): for item in val: rev_match_dict[item] = key count_dict = {}", "np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise", "new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict =", "from python_back_end.utilities.help_functions import general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions", "meta_ds.df_data.iloc[0,0] # if check == 'Combined': # meta_col_int_array = np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for", "n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n) len_list = np.array([ds.df_data.shape[0] for ds in dh])", "reset all entries in new_dh_dict new_dh_dict[dh.name] = dh for dh in new_dh_dict.values(): SubTriangler.name_and_scrub_triangle(dh,", "= np.array( # [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) # print('found') if meta_high <=", "First, if same length, find optimal header naming same_length = np.std(len_array) == 0", "difference = difference.difference(word_set_list[j]) if len(difference) > 0: stringified = sorted([str(el) for el in", "df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) < 4: new_ds", "= new_dh_dict[row_name] else: new_dh = DataHolder(row_name) new_dh_dict[row_name] = new_dh match_dict = SubTriangler.component_finder(unique) rev_match_dict", "ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is", "np.max(counts) / cat_row.size if ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name =", "range(n_el): for j in range(i, n_el): dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m =", "match_dict = dict() for i in range(n_components): comp = np.array(uniques)[labels == i] if", "in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']:", "for name, val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val] ==", "labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict = dict() for i", "index in closest_dists.index} for word_set, ds in zip(word_set_list, dh): for meta_id in closest_dists.index:", "for el in meta_ds.df_data.columns]) # print('found') if meta_high <= ds_high: tr_col_int_array = spatial_info[ds.id]['int_array']", "if same_length: df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key]) df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name", "spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) /", "return fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for", "\" + str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name] else: new_dh =", "dh = dh_dict[name] square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh] square_ind2 = [ds.df_data.shape[1]", ">= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :] unique, counts =", "if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key: headers_dict[key].append(name) len_array[enum] = len(headers_dict[key])", "@staticmethod def identify_category_name(ds, meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index", "== SheetTypeDefinitions.EMPTY_STRING) and not ( np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \"", "(used) # Kind of periodic (thus repetitive entries) (used) # some entries may", "1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list = [ds.orig_sheet_name", "else: new_dh = DataHolder(col_name) new_dh_dict[col_name] = new_dh #period_label_bool = counts * period >", "pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return outer_dict @staticmethod def divide_into_subtriangles(ds, new_dh_dict,", "for the most square ones! for name in fully_represented: dh = dh_dict[name] square_ind1", "!= ds.orig_sheet_name: name = ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles,", "and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh", "# get the dh with the most coherent sizes for name in fully_represented:", "tr_ids = [ds.id for ds in dh] meta_ids = [ds.id for ds in", "elif tri_type == \"single\": # get the dh with the most coherent sizes", "col in deviating_entries: if ds.df_data.loc[ind, col] != \"\" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING:", "spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist = np.abs(meta_high-ds_low)", "def divide_meta_data(dh, meta_dh, word_set_list): # map each meta data to the triangle closest", "= np.unique(occurence_list, return_counts=True) # dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else:", "name, val in cat_row.iteritems(): if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key:", "pp.MAX_N_CATS: if col_name in new_dh_dict: new_dh = new_dh_dict[col_name] else: new_dh = DataHolder(col_name) new_dh_dict[col_name]", "SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in", "tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median =", "of periodic (thus repetitive entries) (used) # some entries may change slightly (used)", "# now load the new_dh for name in match_dict: cond = np.array([string_col.values ==", "def scrub_rows(dh): for ds in dh: d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form", "len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in range(i + 1,", "search for the most square ones! for name in fully_represented: dh = dh_dict[name]", "now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name for col_name, col", "ds_high: tr_col_int_array = spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median", "(ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1] cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row", "# make matches match_dict = dict() for i in range(n_components): comp = np.array(uniques)[labels", "meta data to the triangle closest under it tr_ids = [ds.id for ds", "tr_spatial_dict[\"vert_median\"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))] else: tr_spatial_dict[\"hori_median\"] = np.iinfo(np.uint32).max tr_spatial_dict[\"int_array\"] = np.array([]) tr_spatial_dict[\"vert_median\"]", "return_counts=True) ratio = np.max(counts) / cat_row.size if ratio < 0.5 and len(unique)/cat_row.size <", "name for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j", "n_outputs = kwargs['n_outputs'] new_dh_dict = dict() # This call will reset all entries", "collective name @staticmethod def component_finder(uniques): n_el = len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i", "= distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index}", "in dict dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def", "= dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds in orig_dh} fully_represented = list() for", "tag matches in dict (not checked for now) df_data = ds.df_data df_profiles =", "SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs) return new_dh_dict[dh_name], new_dh_dict @staticmethod def scrub_rows(dh): for ds in", "repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds) #wordset.update(deviating_entries) for ind in ds.df_data.index[repeated]: for col in deviating_entries:", "meta_dh): #df_data = ds.df_data d_cols = DateColIdentifier.identify_marked_date_cols(ds) d_cols = d_cols[d_cols].index date_form = []", "1) not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod", "distances.min(axis=1) closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in closest_dists.index} for", "fully_represented[np.argmin(deviations)] @staticmethod def get_fully_represented(dh_dict, orig_name): orig_dh = dh_dict[orig_name] orig_sheets = {ds.orig_sheet_name for ds", "meta_dh=meta_dh) for ds in list(new_dh_dict.values())[0].data_struct_list: SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh) for new_dh in new_dh_dict.values(): SubTriangler.scrub_rows(new_dh)", "= kwargs['n_outputs'] new_dh_dict = dict() # This call will reset all entries in", "sub_df_data = df_data[cond].drop(columns=[string_col.name]) sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) <", "header in i_headers]) if np.any(missing) and np.any(np.logical_not(missing)): header = i_headers[np.logical_not(missing)][0] for key in", "SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique name for i in range(len(word_set_list)):", "TriangleChopper from python_back_end.utilities.state_handling import DataHolder, DataStruct class SubTriangler: remove_digits = str.maketrans('', '', digits)", "col_name in d_cols: col = ds.df_data[col_name] if len(date_form) == 0: date_form = DateColIdentifier.date_form(col)", "key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array =", "name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name = comp[0][block.a:block.size+block.a] match_dict[name]", "index=df_profiles.index, columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) #", "= np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for i in range(len(info_array)): for j in range(i", "# occurence_list = [ds.orig_sheet_name for ds in dh] # unique, counts = np.unique(occurence_list,", "(not checked for now) df_data = ds.df_data df_profiles = ds.df_profiles orig_name = ds.orig_sheet_name", "uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >=", "set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) #kill repeated headers # strip", "import SequenceMatcher from string import digits from copy import deepcopy import numpy as", "np.sum(cond) < 4: new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name) for split in new_ds.col_split_ds():", "name = \" \".join(stringified) name = name.translate(SubTriangler.remove_digits) else: name = str(i) if ds.name", "# else: # occurence_list = [ds.orig_sheet_name for ds in dh] # unique, counts", "find a common name block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1])) name", "is a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds in dh:", "- spatial_info[ds.id]['hori_median']) else: col_mat = np.abs(tr_col_int_array - meta_col_int_array) col_dist = np.min(col_mat) ind_dist =", "[ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh] square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif", "0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None): new_dh = DataHolder(dh.name)", "columns=out_headers[key]) new_dh.add_sheet(ds.name + \" - \" + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name) # find", "= spatial_info[ds.id]['int_array'] meta_col_int_array = np.array( [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns]) #meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index)", "\"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name)", "row_name = \"Row \" + str(ind) if row_name in new_dh_dict: new_dh = new_dh_dict[row_name]", "#period_label_bool = counts * period > string_col.size - period # now get the", "if len(fully_represented) == 1: return fully_represented[0] # check size coherence deviations = list()", "new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh):", "# if n_outputs == 1: # dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else:", "tri_type == \"aggregate\": # search for the most square ones! for name in", "0: date_form = DateColIdentifier.date_form(col) == 1 else: date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1)", "None: if meta_dh.n > 0: SubTriangler.divide_meta_data(dh, meta_dh, word_set_list) # Find the most unique", "range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j in range(len(word_set_list)): if j", "for ind in ds.df_data.index[repeated]: for col in deviating_entries: if ds.df_data.loc[ind, col] != \"\"", "if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = [] len_array = np.zeros(len(headers_dict), dtype=int) for enum,", "== 1: match_dict[comp[0]] = comp else: # find a common name block =", "np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get the dh with the", "cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW for ind in cat_cols.index[cat_cols]: cat_row = ds.df_data.loc[ind, :]", "= ds.orig_sheet_name for col_name, col in df_data.iteritems(): string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) /", "active = np.isin(unique, val) count_dict[key] = np.sum(counts[active]) # get number of data_structs to", "np.min(distances) if min_dist < pp.MIN_MEDIAN_DISTANCE: use_median = False outer_dict['use_median'] = use_median return outer_dict", "to make headers_dict = {} for key, val in count_dict.items(): if val >", "dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n) # else: # occurence_list = [ds.orig_sheet_name for ds", "meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles =", "col], index=[ind], columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) for", "if median is a reasonable measure of distance spatial_info = SubTriangler.generate_tr_spatial_info(dh) for ds", "np.all(split.df_data == \"\")): meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data,", "np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False for ind in ds.df_data.index[not_date_form]:", "= df_profiles[cond].drop(columns=[string_col.name]) if name == \"\" or np.sum(cond) < 4: new_ds = DataStruct(sub_df_data,", "not_date_form = date_form == False ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form]) @staticmethod def", "np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) # make matches match_dict = dict()", "cat_row.size if ratio < 0.5 and len(unique)/cat_row.size < 0.5: row_name = \"Row \"", "len(uniques) dist_m = np.zeros(shape=(n_el,n_el)) for i in range(n_el): for j in range(i, n_el):", "for i in range(len(word_set_list)): ds = dh.data_struct_list[i] difference = word_set_list[i].copy() for j in", "in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids) distances = pd.DataFrame(np.iinfo(np.uint32).max,", "= [ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for ds in meta_dh],", "key count_dict = {} for key, val in match_dict.items(): active = np.isin(unique, val)", "meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column # Should", "new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds, new_dh_dict, meta_dh): # find the category column #", "= np.array(uniques)[labels == i] if len(comp) == 1: match_dict[comp[0]] = comp else: #", "statment) # Should get tag matches in dict (not checked for now) df_data", "= np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict: for meta_ds in meta_dh.data_dict[ds.name]: if ds.df_data.size >", "orig_sheet_name=ds.orig_sheet_name) for ind in repeated: ds.df_data = ds.df_data.drop(ds.df_data.index[ind]) ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind]) #wordset.add(ds.name) #return", "deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" + tri_type) deviations = np.array(deviations) return", "columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form, :].flatten()) #ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form]) #ds.df_profiles", "tr_spatial_dict = dict() num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles) adj_headers = general_adjacent(num_cols.columns) num_cols = num_cols[adj_headers]", "meta_dh.add_ds(split) else: new_dh.add_sheet(ds.name + \" - \" + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name) @staticmethod", "\"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if \"n_outputs\" in kwargs:", "in dh] meta_ids = [ds.id for ds in meta_dh.data_struct_list] #content = pd.Series([ds.df_data.values[0] for", "# Should be strings (for now) (used) # Kind of periodic (thus repetitive", "{} for key, val in count_dict.items(): if val > pp.MIN_YEARS_SPANNED: headers_dict[key] = []", "header = i_headers[np.logical_not(missing)][0] for key in out_headers: out_headers[key][i] = header for key, val", "dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio() dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el) n_components,", "in outer_dict.values() if el['name'] == name]) distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE for", "np.array([ds.df_data.shape[0] for ds in dh]) len_dev = np.sum(1-(len_list/np.max(len_list))) dev = n_sheet_dev + len_dev", "square_index = np.minimum(square_ind1, square_ind2).mean() deviations.append(1-square_index) elif tri_type == \"single\": # get the dh", "some entries may change slightly (used) # period may change slightly (not checked", "= list() for ds in dh: word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh)) if meta_dh != None: if", "ds in orig_dh} fully_represented = list() for key, dh in dh_dict.items(): sheets =", "it tr_ids = [ds.id for ds in dh] meta_ids = [ds.id for ds", "(for now) (used) # Kind of periodic (thus repetitive entries) (used) # some", "= \"single\" if \"tri_type\" in kwargs: tri_type = kwargs['tri_type'] n_outputs = 1 if", "def divide_into_subtriangles(ds, new_dh_dict, meta_dh): SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh) SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh) @staticmethod def vertical_category_division(ds,", "ds.orig_sheet_name: name = ds.name + \" \" + name new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name)", "pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids) #check if median is a reasonable measure of distance spatial_info", "in zip(word_set_list, dh): for meta_id in closest_dists.index: if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA: if ds.id", "pp.N_DESIRED_PER_SHEET)) / len(unique) deviations.append(dev) else: raise ValueError(\"Unknown triangle type: \" + tri_type) deviations", "sheets are acknowledged fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name) if len(fully_represented) == 1: return", "= np.logical_or(date_form, DateColIdentifier.date_form(col) == 1) not_date_form = date_form == False for ind in", "ValueError(\"Unknown triangle type: \" + tri_type) deviations = np.array(deviations) return fully_represented[np.argmin(deviations)] @staticmethod def", "SubTriangler.generate_tr_spatial_info(dh) for ds in dh: if len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high", "for col_name in d_cols: col = ds.df_data[col_name] if len(date_form) == 0: date_form =", "for ds in dh} if len(orig_sheets.difference(sheets)) == 0: fully_represented.append(key) return fully_represented, len(orig_sheets) @staticmethod", "for i in range(len(info_array)): for j in range(i + 1, len(info_array)): distances[i, j]", "deepcopy import numpy as np import pandas as pd from scipy.sparse.csgraph._traversal import connected_components", "general_adjacent from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter from python_back_end.definitions import SheetTypeDefinitions from python_back_end.program_settings import", "new_dh_dict[col_name] = new_dh #period_label_bool = counts * period > string_col.size - period #", "= [] for col_name in d_cols: col = ds.df_data[col_name] if len(date_form) == 0:", "#meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))] if len(tr_col_int_array) > 0: if spatial_info['use_median']: meta_median =", "== 1: return fully_represented[0] # check size coherence deviations = list() if tri_type", "= dist_m + np.transpose(dist_m) - np.eye(n_el) n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM) #", "columns=[col]) temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col]) meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name) #wordset = set(ds.df_data.values[not_date_form,", "len(ds.df_data.index) > 0: ds_low = np.min(np.array(ds.df_data.index)) ds_high = np.max(np.array(ds.df_data.index)) if ds.name in meta_dh.data_dict:" ]
[ "#def between_markers(text: str, begin: str, end: str) -> str: #start = text.find(begin) +", "str, begin: str, end: str) -> str: #if begin in text and end", "between_markers('What is >apple<', '>', '<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new", "'No hi', 'No markers at all' f = between_markers('No <hi>', '>', '<') #==", "= text.find(begin) + len(begin) if begin in text else None #stop = text.find(end)", "#def between_markers(text: str, begin: str, end: str) -> str: #start, stop = map(text.find,", ">apple<', '>', '<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\",", "Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str, end: str) -> str: #start", "\"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new", "between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers at all' f =", "(begin, end)) #return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]] →", "0]:(stop, None)[stop < 0]] → if-tuple slicing #def between_markers(text: str, begin: str, end:", "if begin in text else None #stop = text.find(end) if end in text", "#return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What is >apple<', '>',", "hi', 'No markers at all' f = between_markers('No <hi>', '>', '<') #== '',", "#if begin in text and end in text and text.index(begin) > text.index(end): #return", "f = between_markers('No <hi>', '>', '<') #== '', 'Wrong direction' print(a, b, c,", "None #stop = text.find(end) if end in text else None #return text[start:stop] #def", "begin: str, end: str) -> str: #start = text.find(begin) + len(begin) if begin", "text and end in text and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0]", "slicing #def between_markers(text: str, begin: str, end: str) -> str: #if begin in", "str, end: str): if begin in text and end in text: if text.find(begin)>text.find(end):", "between_markers(text: str, begin: str, end: str) -> str: #start, stop = map(text.find, (begin,", "not in text: return text[:] elif begin not in text: return text[:text.find(end)] elif", "#== \"My new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No',", "else None #stop = text.find(end) if end in text else None #return text[start:stop]", "len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing #def between_markers(text: str,", "'No', 'No opened' d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close'", "opened' d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close' e =", "str, begin: str, end: str) -> str: #start, stop = map(text.find, (begin, end))", "and end in text and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------#", "stop = map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop", "Solution # #--------------------------------------------# def between_markers(text: str, begin: str, end: str): if begin in", "text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str, end:", "str: #if begin in text and end in text and text.index(begin) > text.index(end):", "\"My new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No", "len(begin) if begin in text else None #stop = text.find(end) if end in", "between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened' d = between_markers('No [b]hi', '[b]',", "map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]]", "text[start:stop] #def between_markers(text: str, begin: str, end: str) -> str: #start, stop =", "begin not in text and end not in text: return text[:] elif begin", "if end in text else None #return text[start:stop] #def between_markers(text: str, begin: str,", "return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end not", "end: str) -> str: #if begin in text and end in text and", "#== 'No', 'No opened' d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No", "0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing #def between_markers(text: str, begin:", "text: return text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other", "in text and end not in text: return text[:] elif begin not in", "#== 'hi', 'No close' e = between_markers('No hi', '[b]', '[/b]') #== 'No hi',", "end: str) -> str: #start, stop = map(text.find, (begin, end)) #return text[(start +", "end in text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not", "#== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My", "# #--------------------------------------------# #def between_markers(text: str, begin: str, end: str) -> str: #start =", "\"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened' d =", "#return text[start:stop] #def between_markers(text: str, begin: str, end: str) -> str: #start, stop", "return text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions", "sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\"", "= between_markers('What is >apple<', '>', '<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My", "new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c = between_markers('No[/b] hi',", "in text and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test", "'>', '<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\")", "begin: str, end: str): if begin in text and end in text: if", "text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing #def", "#return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What is", "site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened' d", "#--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What is >apple<', '>', '<') #==", "not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text:", "str, end: str) -> str: #start, stop = map(text.find, (begin, end)) #return text[(start", "-> str: #start, stop = map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start", "def between_markers(text: str, begin: str, end: str): if begin in text and end", "text and end not in text: return text[:] elif begin not in text:", "return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str,", "'[b]', '[/b]') #== 'No', 'No opened' d = between_markers('No [b]hi', '[b]', '[/b]') #==", "str): if begin in text and end in text: if text.find(begin)>text.find(end): return \"\"", "= between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close' e = between_markers('No hi',", "text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text", "My Solution # #--------------------------------------------# def between_markers(text: str, begin: str, end: str): if begin", "= between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers at all' f", "'[/b]') #== 'hi', 'No close' e = between_markers('No hi', '[b]', '[/b]') #== 'No", "begin in text and end in text: if text.find(begin)>text.find(end): return \"\" else: return", "#--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str, end: str)", "between_markers(text: str, begin: str, end: str): if begin in text and end in", "'[/b]') #== 'No', 'No opened' d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi',", "Test # #--------------------------------------------# a = between_markers('What is >apple<', '>', '<') #== \"apple\", \"One", "not in text and end not in text: return text[:] elif begin not", "str: #start = text.find(begin) + len(begin) if begin in text else None #stop", "> text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a =", "text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What", "between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c = between_markers('No[/b]", "hi', '[b]', '[/b]') #== 'No hi', 'No markers at all' f = between_markers('No", "text.find(end) if end in text else None #return text[start:stop] #def between_markers(text: str, begin:", "# My Solution # #--------------------------------------------# def between_markers(text: str, begin: str, end: str): if", "# Test # #--------------------------------------------# a = between_markers('What is >apple<', '>', '<') #== \"apple\",", "text else None #stop = text.find(end) if end in text else None #return", "str, begin: str, end: str) -> str: #start = text.find(begin) + len(begin) if", "if-tuple slicing #def between_markers(text: str, begin: str, end: str) -> str: #if begin", "-> str: #if begin in text and end in text and text.index(begin) >", "site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]',", "'' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What is >apple<',", "+ len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing #def between_markers(text:", "return text[:] elif begin not in text: return text[:text.find(end)] elif end not in", "#--------------------------------------------# a = between_markers('What is >apple<', '>', '<') #== \"apple\", \"One sym\" b", "text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin:", "text: return text[:] elif begin not in text: return text[:text.find(end)] elif end not", "#start, stop = map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start < 0]:(stop,", "0]] → if-tuple slicing #def between_markers(text: str, begin: str, end: str) -> str:", "text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end", "elif begin not in text and end not in text: return text[:] elif", "< 0]:(stop, None)[stop < 0]] → if-tuple slicing #def between_markers(text: str, begin: str,", "# Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str, end: str) ->", "elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------#", "str) -> str: #start = text.find(begin) + len(begin) if begin in text else", "= between_markers('No <hi>', '>', '<') #== '', 'Wrong direction' print(a, b, c, d,", "str) -> str: #if begin in text and end in text and text.index(begin)", "text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions #", "'hi', 'No close' e = between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No", "new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened'", "None #return text[start:stop] #def between_markers(text: str, begin: str, end: str) -> str: #start,", "#stop = text.find(end) if end in text else None #return text[start:stop] #def between_markers(text:", "text else None #return text[start:stop] #def between_markers(text: str, begin: str, end: str) ->", "markers at all' f = between_markers('No <hi>', '>', '<') #== '', 'Wrong direction'", "#--------------------------------------------# #def between_markers(text: str, begin: str, end: str) -> str: #start = text.find(begin)", "begin: str, end: str) -> str: #if begin in text and end in", "Other Solutions # #--------------------------------------------# #def between_markers(text: str, begin: str, end: str) -> str:", "#start = text.find(begin) + len(begin) if begin in text else None #stop =", "None)[stop < 0]] → if-tuple slicing #def between_markers(text: str, begin: str, end: str)", "str) -> str: #start, stop = map(text.find, (begin, end)) #return text[(start + len(begin),", "close' e = between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers at", "hi', '[b]', '[/b]') #== 'No', 'No opened' d = between_markers('No [b]hi', '[b]', '[/b]')", "between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close' e = between_markers('No hi', '[b]',", "\"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\",", "in text and end in text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)]", "text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end not in text: return text[:]", "in text: return text[:] elif begin not in text: return text[:text.find(end)] elif end", "in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def between_markers(text: str,", "'[b]', '[/b]') #== 'hi', 'No close' e = between_markers('No hi', '[b]', '[/b]') #==", "#== 'No hi', 'No markers at all' f = between_markers('No <hi>', '>', '<')", "\"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end not in", "at all' f = between_markers('No <hi>', '>', '<') #== '', 'Wrong direction' print(a,", "is >apple<', '>', '<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\",", "\"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]')", "str: #start, stop = map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start <", "end not in text: return text[:] elif begin not in text: return text[:text.find(end)]", "not in text: return text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------#", "#def between_markers(text: str, begin: str, end: str) -> str: #if begin in text", "end in text and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# #", "[b]hi', '[b]', '[/b]') #== 'hi', 'No close' e = between_markers('No hi', '[b]', '[/b]')", "end)) #return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple", "text and end in text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif", "begin not in text: return text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):]", "text and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test #", "end: str) -> str: #start = text.find(begin) + len(begin) if begin in text", "begin: str, end: str) -> str: #start, stop = map(text.find, (begin, end)) #return", "'No markers at all' f = between_markers('No <hi>', '>', '<') #== '', 'Wrong", "text[:] elif begin not in text: return text[:text.find(end)] elif end not in text:", "-> str: #start = text.find(begin) + len(begin) if begin in text else None", "= map(text.find, (begin, end)) #return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop <", "and end not in text: return text[:] elif begin not in text: return", "in text: return text[:text.find(end)] elif end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# #", "str, end: str) -> str: #start = text.find(begin) + len(begin) if begin in", "#return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing", "in text else None #stop = text.find(end) if end in text else None", "'[b]', '[/b]') #== 'No hi', 'No markers at all' f = between_markers('No <hi>',", "text.find(begin) + len(begin) if begin in text else None #stop = text.find(end) if", "c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened' d = between_markers('No", "'No opened' d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close' e", "<hi>', '>', '<') #== '', 'Wrong direction' print(a, b, c, d, e, f,", "b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c", "\"</title>\") #== \"My new site\", \"HTML\" c = between_markers('No[/b] hi', '[b]', '[/b]') #==", "# #--------------------------------------------# a = between_markers('What is >apple<', '>', '<') #== \"apple\", \"One sym\"", "and text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------#", "'No close' e = between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers", "if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and", "end: str): if begin in text and end in text: if text.find(begin)>text.find(end): return", "in text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in", "< 0]] → if-tuple slicing #def between_markers(text: str, begin: str, end: str) ->", "text.index(begin) > text.index(end): #return '' #return text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a", "elif begin not in text: return text[:text.find(end)] elif end not in text: return", "begin in text else None #stop = text.find(end) if end in text else", "between_markers(text: str, begin: str, end: str) -> str: #if begin in text and", "#--------------------------------------------# def between_markers(text: str, begin: str, end: str): if begin in text and", "→ if-tuple slicing #def between_markers(text: str, begin: str, end: str) -> str: #if", "'[/b]') #== 'No hi', 'No markers at all' f = between_markers('No <hi>', '>',", "end not in text: return text[text.find(begin)+len(begin):] #--------------------------------------------# # Other Solutions # #--------------------------------------------# #def", "in text and end in text and text.index(begin) > text.index(end): #return '' #return", "e = between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers at all'", "between_markers(text: str, begin: str, end: str) -> str: #start = text.find(begin) + len(begin)", "= between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened' d = between_markers('No [b]hi',", "else None #return text[start:stop] #def between_markers(text: str, begin: str, end: str) -> str:", "#--------------------------------------------# # My Solution # #--------------------------------------------# def between_markers(text: str, begin: str, end: str):", "begin in text and end in text and text.index(begin) > text.index(end): #return ''", "end in text else None #return text[start:stop] #def between_markers(text: str, begin: str, end:", "# #--------------------------------------------# def between_markers(text: str, begin: str, end: str): if begin in text", "= text.find(end) if end in text else None #return text[start:stop] #def between_markers(text: str,", "return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end not in text: return", "= between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #== \"My new site\", \"HTML\" c =", "all' f = between_markers('No <hi>', '>', '<') #== '', 'Wrong direction' print(a, b,", "'>', '<') #== '', 'Wrong direction' print(a, b, c, d, e, f, sep=\"\\n\")", "if begin in text and end in text: if text.find(begin)>text.find(end): return \"\" else:", "+ len(begin) if begin in text else None #stop = text.find(end) if end", "a = between_markers('What is >apple<', '>', '<') #== \"apple\", \"One sym\" b =", "and end in text: if text.find(begin)>text.find(end): return \"\" else: return text[text.find(begin)+len(begin):text.find(end)] elif begin", "'<') #== \"apple\", \"One sym\" b = between_markers(\"<head><title>My new site</title></head>\", \"<title>\", \"</title>\") #==", "str, end: str) -> str: #if begin in text and end in text", "str, begin: str, end: str): if begin in text and end in text:", "between_markers('No <hi>', '>', '<') #== '', 'Wrong direction' print(a, b, c, d, e,", "text.split(begin)[-1].split(end)[0] #--------------------------------------------# # Test # #--------------------------------------------# a = between_markers('What is >apple<', '>', '<')", "else: return text[text.find(begin)+len(begin):text.find(end)] elif begin not in text and end not in text:", "in text else None #return text[start:stop] #def between_markers(text: str, begin: str, end: str)", "d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close' e = between_markers('No" ]
[ "#!/usr/local/bin/python import spacy nlp = spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs =", "return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text)", "entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum", "for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity:", "doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity: 0 for entity", "[\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc = nlp(text) for token in doc:", "= nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount):", "1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict =", "= updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for k,v", "def updateAlphaLs(text): alphaLs = [] doc = nlp(text) for token in doc: if(token.is_alpha):", "entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount):", "0 for entity in entityLs} doc = nlp(text) for entity in doc.ents: entityDict[entity.label_]", "= updateEntityLs(text) entitySum = 0 for k,v in entityDict.items(): entitySum += v return(entitySum/wordCount)", "= [] doc = nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs", "entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc = nlp(text) for token", "nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict =", "for entity in entityLs} doc = nlp(text) for entity in doc.ents: entityDict[entity.label_] +=", "nlp = spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc =", "doc = nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1 return entityDict def", "alphaLs def updateEntityLs(text): entityDict = {entity: 0 for entity in entityLs} doc =", "updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for k,v in", "def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for k,v in entityDict.items(): entitySum", "{entity: 0 for entity in entityLs} doc = nlp(text) for entity in doc.ents:", "def updateEntityLs(text): entityDict = {entity: 0 for entity in entityLs} doc = nlp(text)", "alphaLs = [] doc = nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return", "if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity: 0 for entity in", "alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity: 0 for entity in entityLs}", "def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum =", "nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs", "in doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount)", "= spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc = nlp(text)", "spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc = nlp(text) for", "in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity: 0 for", "import spacy nlp = spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = []", "entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for k,v in entityDict.items(): entitySum +=", "= {entity: 0 for entity in entityLs} doc = nlp(text) for entity in", "+= 1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict", "= [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc = nlp(text) for token in", "doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def", "entityDict = updateEntityLs(text) entitySum = 0 for k,v in entityDict.items(): entitySum += v", "updateAlphaLs(text): alphaLs = [] doc = nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token)", "[] doc = nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def", "entity in entityLs} doc = nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1", "entityLs} doc = nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1 return entityDict", "= nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict", "return alphaLs def updateEntityLs(text): entityDict = {entity: 0 for entity in entityLs} doc", "alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for", "spacy nlp = spacy.load('en') entityLs = [\"ORG\",\"PERSON\",\"DATE\",\"TIME\",\"MONEY\",\"PERCENT\",\"FAC\",\"GPE\",\"NORP\",\"WORK_OF_ART\",\"QUANTITY\",\"LOC\",\"PRODUCT\",\"EVENT\",\"LAW\",\"LANGUAGE\",\"ORDINAL\",\"CARDINAL\"] def updateAlphaLs(text): alphaLs = [] doc", "doc = nlp(text) for token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text):", "in entityLs} doc = nlp(text) for entity in doc.ents: entityDict[entity.label_] += 1 return", "return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0 for k,v in entityDict.items():", "entityDict = {entity: 0 for entity in entityLs} doc = nlp(text) for entity", "entity in doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text)", "token in doc: if(token.is_alpha): alphaLs.append(token) return alphaLs def updateEntityLs(text): entityDict = {entity: 0", "updateEntityLs(text): entityDict = {entity: 0 for entity in entityLs} doc = nlp(text) for", "for entity in doc.ents: entityDict[entity.label_] += 1 return entityDict def alphaPercentage(text,wordCount): alphaLs =", "alphaPercentage(text,wordCount): alphaLs = updateAlphaLs(text) return(len(alphaLs)/wordCount) def entityPercentage(text,wordCount): entityDict = updateEntityLs(text) entitySum = 0" ]
[ "models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin',", "django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True", "on 2022-05-22 00:16 from django.conf import settings from django.db import migrations, models import", "= True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space works", "migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4,", "<filename>trello/space_works/migrations/0001_initial.py # Generated by Django 3.2.13 on 2022-05-22 00:16 from django.conf import settings", "models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work',", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid", "options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True,", "its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact", "], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField(", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={", "initial = True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "space works data and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active", "verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40,", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)),", "django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('users', '0001_initial'),", "in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')),", "at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update", "models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name',", "00:16 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import", "import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created',", "models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle', verbose_name='active status')),", "models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [", "), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created", "False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the", "'0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),", "can update the space works data and manage its members', verbose_name='space_works admin')), ('is_active',", "], options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created',", "('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works", "the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ],", "('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'),", "related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False, },", "('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),", "'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True,", "('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract':", "model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified',", "name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "2022-05-22 00:16 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "help_text='Space Works admins can update the space works data and manage its members',", "verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can", "to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),", "to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True,", "}, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),", "models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={", "= [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),", "admins can update the space works data and manage its members', verbose_name='space_works admin')),", "models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ),", "dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[", "active users are allowed to interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True,", "admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle',", "data and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are", "allowed to interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)),", "model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List',", "settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial", "unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField(", "django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class", "status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created',", "# Generated by Django 3.2.13 on 2022-05-22 00:16 from django.conf import settings from", "'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user',", "import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration):", "models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'),", "name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "users are allowed to interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,", "to interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile',", "to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ),", "operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified", "('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)),", "serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space works data and", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork',", "migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4,", "), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),", "('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ],", "members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact in", "at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description',", "import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('users',", "models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space", "), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid',", "the space works data and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only", "class Migration(migrations.Migration): initial = True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel(", "migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies =", "[ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True,", "primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space works data", "on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False,", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract':", "3.2.13 on 2022-05-22 00:16 from django.conf import settings from django.db import migrations, models", "models.BooleanField(default=False, help_text='Space Works admins can update the space works data and manage its", "True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified", "name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[", "editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members',", "), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid',", "('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={ 'abstract': False, }, ),", "help_text='Only active users are allowed to interact in the circle', verbose_name='active status')), ('invited_by',", "models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space Works admins", "}, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),", "editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={ 'abstract':", "models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={ 'abstract': False, }, ), ]", "serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={ 'abstract': False, },", "to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[", "models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work',", "Django 3.2.13 on 2022-05-22 00:16 from django.conf import settings from django.db import migrations,", "] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True,", "Migration(migrations.Migration): initial = True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "Works admins can update the space works data and manage its members', verbose_name='space_works", "'-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified',", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public',", "('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space works data and manage", "('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership',", "'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created", "name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False,", "name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True,", "to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ),", "models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False,", "migrations.AddField( model_name='membership', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.CreateModel( name='List', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),", "Generated by Django 3.2.13 on 2022-05-22 00:16 from django.conf import settings from django.db", "serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)),", "verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact in the", "verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering':", "update the space works data and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True,", "models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name',", "('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created", "are allowed to interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by',", "by Django 3.2.13 on 2022-05-22 00:16 from django.conf import settings from django.db import", "verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False,", "verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)),", "('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle', verbose_name='active", "fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True,", "[ migrations.CreateModel( name='Membership', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid',", "verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "works data and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users", "options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'), ), migrations.AddField( model_name='membership',", "manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to", "at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)),", "('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={ 'ordering': ('-created', '-modified'), 'abstract': False, }, ), migrations.CreateModel(", "at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('is_admin', models.BooleanField(default=False, help_text='Space", "uuid class Migration(migrations.Migration): initial = True dependencies = [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership',", "and manage its members', verbose_name='space_works admin')), ('is_active', models.BooleanField(default=True, help_text='Only active users are allowed", "False, }, ), migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified", "migrations.CreateModel( name='SpaceWork', fields=[ ('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4,", "at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')),", "('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='membership', name='space_work', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "= [ ('users', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Membership', fields=[ ('created',", "primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ], options={ 'abstract': False,", "('slug_name', models.SlugField(max_length=40, unique=True)), ('is_public', models.BooleanField(default=True)), ('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, },", "interact in the circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')), ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),", "('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('description', models.TextField(max_length=500)), ('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')), ],", "import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies", "circle', verbose_name='active status')), ('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')), ], options={", "from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial =" ]
[ "as nn import torch.nn.functional as F import copy class MT(nn.Module): def __init__(self, model,", "y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(),", "y as input of forward function is detached model.update_batch_stats(True) if self.loss == 'mse':", "detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is not None: target_weights =", "return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1)", "ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor self.global_step =", "mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return", "import torch import torch.nn as nn import torch.nn.functional as F import copy class", "== 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown", "return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(),", "= loss # scl weight if scl_weight is not None: self.scl_weight = scl_weight", "scl_weight is not None: self.scl_weight = scl_weight else: self.scl_weight = None def forward(self,", "ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 - 1", "loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor self.global_step = 0", "function is detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is not None:", "'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown loss", "target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) *", "model, mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x) #", "torch import torch.nn as nn import torch.nn.functional as F import copy class MT(nn.Module):", "if scl_weight is not None: self.scl_weight = scl_weight else: self.scl_weight = None def", "= self.model(x) model.update_batch_stats(False) y = model(x) # recompute y since y as input", "= torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean()", "not None: self.scl_weight = scl_weight else: self.scl_weight = None def forward(self, x, y,", "recompute y since y as input of forward function is detached model.update_batch_stats(True) if", "raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 -", "mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise", "of forward function is detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is", "= min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters):", "ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data + (1 -", "y since y as input of forward function is detached model.update_batch_stats(True) if self.loss", "y = model(x) # recompute y since y as input of forward function", "1 / (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor", "super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss =", "copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model", "= scl_weight else: self.scl_weight = None def forward(self, x, y, model, mask): self.global_step", "in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data + (1 - ema_factor) *", "# recompute y since y as input of forward function is detached model.update_batch_stats(True)", "scl_weight else: self.scl_weight = None def forward(self, x, y, model, mask): self.global_step +=", "self.global_step = 0 self.loss = loss # scl weight if scl_weight is not", "if self.loss == 'mse': if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t:", "(target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) *", "# scl weight if scl_weight is not None: self.scl_weight = scl_weight else: self.scl_weight", "= None def forward(self, x, y, model, mask): self.global_step += 1 y_hat =", "y, model, mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x)", "else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(),", "nn import torch.nn.functional as F import copy class MT(nn.Module): def __init__(self, model, ema_factor,", "torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else:", "- 1 / (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data =", "(F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def", "F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif", "= 0 self.loss = loss # scl weight if scl_weight is not None:", "import torch.nn as nn import torch.nn.functional as F import copy class MT(nn.Module): def", "is not None: self.scl_weight = scl_weight else: self.scl_weight = None def forward(self, x,", "self.scl_weight = None def forward(self, x, y, model, mask): self.global_step += 1 y_hat", "model self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss = loss # scl", "weight if scl_weight is not None: self.scl_weight = scl_weight else: self.scl_weight = None", "self.scl_weight = scl_weight else: self.scl_weight = None def forward(self, x, y, model, mask):", "is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 - 1 /", "F import copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model", "as input of forward function is detached model.update_batch_stats(True) if self.loss == 'mse': if", "forward(self, x, y, model, mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y", "0 self.loss = loss # scl weight if scl_weight is not None: self.scl_weight", "unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 - 1 / (self.global_step+1),", "as F import copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__()", "== 'mse': if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1])))", "self.loss = loss # scl weight if scl_weight is not None: self.scl_weight =", "zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data + (1 - ema_factor) * p.data", "mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x) # recompute", "self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x) # recompute y", "reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters):", "input of forward function is detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight", "else: self.scl_weight = None def forward(self, x, y, model, mask): self.global_step += 1", "self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss = loss # scl weight", "/ (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor *", "model(x) # recompute y since y as input of forward function is detached", "return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss))", "self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1),", "type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor) for", "None: self.scl_weight = scl_weight else: self.scl_weight = None def forward(self, x, y, model,", "import torch.nn.functional as F import copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse',", "moving_average(self, parameters): ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p, p", "self.model(x) model.update_batch_stats(False) y = model(x) # recompute y since y as input of", "since y as input of forward function is detached model.update_batch_stats(True) if self.loss ==", "None def forward(self, x, y, model, mask): self.global_step += 1 y_hat = self.model(x)", "y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss", "def moving_average(self, parameters): ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p,", "def forward(self, x, y, model, mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False)", "elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{}", "forward function is detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is not", "torch.nn.functional as F import copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None):", "self.model = model self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss = loss", "= model self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss = loss #", "+= 1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x) # recompute y since", "class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train()", "= ema_factor self.global_step = 0 self.loss = loss # scl weight if scl_weight", "self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is", "loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor)", "= model(x) # recompute y since y as input of forward function is", "y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self,", "self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights *", "ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(),", "torch.nn as nn import torch.nn.functional as F import copy class MT(nn.Module): def __init__(self,", "loss # scl weight if scl_weight is not None: self.scl_weight = scl_weight else:", "import copy class MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model =", "'mse': if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return", "t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return", "model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor self.global_step", "model.update_batch_stats(False) y = model(x) # recompute y since y as input of forward", "y_hat = self.model(x) model.update_batch_stats(False) y = model(x) # recompute y since y as", "for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data + (1", "not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(),", "is detached model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is not None: target_weights", "model.update_batch_stats(True) if self.loss == 'mse': if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda", "* mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor", "reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean()", "(F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1)", "is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1),", "* mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld':", "y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) *", "* mask).mean() elif self.loss == 'kld': return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction=\"none\").sum(1) * mask).mean() else:", "mask).mean() else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor =", "__init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor", "self.ema_factor = ema_factor self.global_step = 0 self.loss = loss # scl weight if", "scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor = ema_factor self.global_step = 0 self.loss", "scl weight if scl_weight is not None: self.scl_weight = scl_weight else: self.scl_weight =", "MT(nn.Module): def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor", "def __init__(self, model, ema_factor, loss='mse', scl_weight=None): super().__init__() self.model = model self.model.train() self.ema_factor =", "if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights", "min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data", "ema_factor self.global_step = 0 self.loss = loss # scl weight if scl_weight is", "else: raise ValueError(\"{} is unknown loss type\".format(self.loss)) def moving_average(self, parameters): ema_factor = min(1", "self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data +", "p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data + (1 - ema_factor)", "None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1]))) return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1)", "1 y_hat = self.model(x) model.update_batch_stats(False) y = model(x) # recompute y since y", "reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() elif self.loss ==", "(self.global_step+1), self.ema_factor) for ema_p, p in zip(self.model.parameters(), parameters): ema_p.data = ema_factor * ema_p.data", "* F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean() else: return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction=\"none\").mean(1) * mask).mean()", "self.loss == 'mse': if self.scl_weight is not None: target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data],", "parameters): ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor) for ema_p, p in", "x, y, model, mask): self.global_step += 1 y_hat = self.model(x) model.update_batch_stats(False) y =" ]
[]
[ "Recursive part of the recreate application \"\"\" def __init__(self, file_path): \"\"\" Init class", "file datetime created to metadata.creation_time in all files in folder Copyright (c) 2021", "\"\"\" Init class variables \"\"\" self.files = [] self.error = None if os.path.isfile(file_path):", "created to metadata.creation_time in all files in folder Copyright (c) 2021 <NAME> (<EMAIL>)", "def __init__(self, file_path): \"\"\" Init class variables \"\"\" self.files = [] self.error =", "self.files = [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir,", "(<EMAIL>) \"\"\" import io import sys import os from recreate.common import myfile class", "recreate.common import myfile class MyFolder: \"\"\" Recursive part of the recreate application \"\"\"", "import myfile class MyFolder: \"\"\" Recursive part of the recreate application \"\"\" def", "part of the recreate application \"\"\" def __init__(self, file_path): \"\"\" Init class variables", "files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error = file_path", "in all files in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io", "to metadata.creation_time in all files in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\"", "Init class variables \"\"\" self.files = [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path))", "metadata.creation_time in all files in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import", "os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file)))", "for subdir, dirs, files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else:", "file_path): \"\"\" Init class variables \"\"\" self.files = [] self.error = None if", "import io import sys import os from recreate.common import myfile class MyFolder: \"\"\"", "MyFolder: set file datetime created to metadata.creation_time in all files in folder Copyright", "\"\"\" Recursive part of the recreate application \"\"\" def __init__(self, file_path): \"\"\" Init", "all files in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io import", "files in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io import sys", "variables \"\"\" self.files = [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path):", "self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in", "dirs, files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error =", "None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for", "import os from recreate.common import myfile class MyFolder: \"\"\" Recursive part of the", "MyFolder: \"\"\" Recursive part of the recreate application \"\"\" def __init__(self, file_path): \"\"\"", "__init__(self, file_path): \"\"\" Init class variables \"\"\" self.files = [] self.error = None", "sys import os from recreate.common import myfile class MyFolder: \"\"\" Recursive part of", "elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir,", "in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error = file_path +", "for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error = file_path + 'Invalid path!'", "\"\"\" def __init__(self, file_path): \"\"\" Init class variables \"\"\" self.files = [] self.error", "<reponame>majo48/recreate-git<gh_stars>0 \"\"\" Class MyFolder: set file datetime created to metadata.creation_time in all files", "from recreate.common import myfile class MyFolder: \"\"\" Recursive part of the recreate application", "self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for file in files:", "= [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs,", "os from recreate.common import myfile class MyFolder: \"\"\" Recursive part of the recreate", "class variables \"\"\" self.files = [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif", "Class MyFolder: set file datetime created to metadata.creation_time in all files in folder", "= None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path):", "\"\"\" import io import sys import os from recreate.common import myfile class MyFolder:", "subdir, dirs, files in os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error", "if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for file", "in folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io import sys import", "os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files in os.walk(file_path): for file in", "datetime created to metadata.creation_time in all files in folder Copyright (c) 2021 <NAME>", "\"\"\" Class MyFolder: set file datetime created to metadata.creation_time in all files in", "io import sys import os from recreate.common import myfile class MyFolder: \"\"\" Recursive", "class MyFolder: \"\"\" Recursive part of the recreate application \"\"\" def __init__(self, file_path):", "\"\"\" self.files = [] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for", "Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io import sys import os from", "import sys import os from recreate.common import myfile class MyFolder: \"\"\" Recursive part", "set file datetime created to metadata.creation_time in all files in folder Copyright (c)", "(c) 2021 <NAME> (<EMAIL>) \"\"\" import io import sys import os from recreate.common", "of the recreate application \"\"\" def __init__(self, file_path): \"\"\" Init class variables \"\"\"", "the recreate application \"\"\" def __init__(self, file_path): \"\"\" Init class variables \"\"\" self.files", "application \"\"\" def __init__(self, file_path): \"\"\" Init class variables \"\"\" self.files = []", "recreate application \"\"\" def __init__(self, file_path): \"\"\" Init class variables \"\"\" self.files =", "[] self.error = None if os.path.isfile(file_path): self.files.append(myfile.MyFile(file_path)) elif os.path.isdir(file_path): for subdir, dirs, files", "os.walk(file_path): for file in files: self.files.append(myfile.MyFile(os.path.join(subdir, file))) else: self.error = file_path + 'Invalid", "myfile class MyFolder: \"\"\" Recursive part of the recreate application \"\"\" def __init__(self,", "folder Copyright (c) 2021 <NAME> (<EMAIL>) \"\"\" import io import sys import os", "2021 <NAME> (<EMAIL>) \"\"\" import io import sys import os from recreate.common import", "<NAME> (<EMAIL>) \"\"\" import io import sys import os from recreate.common import myfile" ]
[ "2019-05-21 06:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ]", "dependencies = [ ] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)),", "= [ ] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [", "class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id',", "operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()),", "] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content',", "2.0 on 2019-05-21 06:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel(", "fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()), ('publish_date', models.DateTimeField(auto_now=True)), ], ), ]", "06:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations", "# Generated by Django 2.0 on 2019-05-21 06:57 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True,", "= [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()), ('publish_date',", "by Django 2.0 on 2019-05-21 06:57 from django.db import migrations, models class Migration(migrations.Migration):", "on 2019-05-21 06:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "Django 2.0 on 2019-05-21 06:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "[ ] operations = [ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()),", "migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()), ('publish_date', models.DateTimeField(auto_now=True)), ],", "models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Summary', fields=[", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations =", "name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()), ('publish_date', models.DateTimeField(auto_now=True)), ], ),", "Generated by Django 2.0 on 2019-05-21 06:57 from django.db import migrations, models class", "[ migrations.CreateModel( name='Summary', fields=[ ('summary_id', models.AutoField(primary_key=True, serialize=False)), ('summary_title', models.TextField()), ('summary_content', models.TextField()), ('publish_date', models.DateTimeField(auto_now=True)),", "migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Summary'," ]
[ "as cms process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './'", "process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3')", "cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon", "process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3')", "record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype", "= cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) )", "record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames =", "timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') ))", "cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root',", "'./' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype", "= cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype =", "= cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root'", ")) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) )", "cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet(", "tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet", "process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'),", "cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet", "'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon =", "process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input =", "process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )", "cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'),", "= cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet =", "= cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype =", "toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss =", "FWCore.ParameterSet.Config as cms process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath =", ")) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record", "cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames", "record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype", ") process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record =", "tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet", "= cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\",", ")) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record", "tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root',", "= cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'),", "= cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\",", "= cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) )", "cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'),", "= cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet =", "= cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag", "process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'),", "toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source =", "cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo = cms.EDAnalyzer('RiovTest') process.p", "process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype =", "process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1)", ") process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record =", "= cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet =", "= cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag", "timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') ))", "cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp", "= cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet(", "process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3')", "cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon,", "process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3')", "= cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet(", "= cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring(", "= cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag", "cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record", "cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag =", "toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon =", "cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss", "cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet(", "cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag =", "tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet", "= 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon", "cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag =", "= cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) )", "cms.string('RPCObTempRcd'), tag = cms.string('Temp_v3') )) ) process.rpvss = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'),", "= cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\",", ") process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record =", "cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon,", "= './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon,", "= cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObTempRcd'), tag", "fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo = cms.EDAnalyzer('RiovTest') process.p =", "= cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) )", "process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'),", "cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag =", "cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') )) ) process.source", "= cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype =", "process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo =", "record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype", "= cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\",", "cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input", "process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\",", "= cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo = cms.EDAnalyzer('RiovTest')", "toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp =", "cms.string('Imon_v3') )) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet(", "process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents =", "<filename>CondTools/RPC/test/riovtest_cfg.py import FWCore.ParameterSet.Config as cms process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db'", ")) ) process.rvmon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record", "cms process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath = './' process.maxEvents", "process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'),", "timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObPVSSmapRcd'), tag = cms.string('PVSS_v3') ))", "timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record = cms.string('RPCObImonRcd'), tag = cms.string('Imon_v3') ))", "cms.string('PVSS_v3') )) ) process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' )", ") process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet = cms.VPSet(cms.PSet( record =", "cms.VPSet(cms.PSet( record = cms.string('RPCObVmonRcd'), tag = cms.string('Vmon_v3') )) ) process.rtemp = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon,", ") process.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo", "= cms.untracked.vstring( '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root', '/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root' ) ) process.demo = cms.EDAnalyzer('RiovTest') process.p = cms.Path(process.demo)", "input = cms.untracked.int32(-1) ) process.rimon = cms.ESSource(\"PoolDBESSource\", process.CondDBCommon, timetype = cms.string('timestamp'), toGet =", "import FWCore.ParameterSet.Config as cms process = cms.Process(\"Demo\") process.load(\"CondCore.DBCommon.CondDBCommon_cfi\") process.load(\"FWCore.MessageService.MessageLogger_cfi\") process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db' process.CondDBCommon.DBParameters.authenticationPath" ]
[ "= a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q =", "= open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?')", "= q.translate(None, string.punctuation) q = q+\"?\" output_line = q + \"\\t\" + a", "= line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a =", "= q + \"\\t\" + a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i]", "+ \"\\t\" + a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else:", "in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line = line.translate(None, string.punctuation) output.write(line) print(\"done\")", "= a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\" output_line = q", "\"\\t\" + a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line", "tmp = line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \")", "if \"?\" in line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1]", "q.translate(None, string.punctuation) q = q+\"?\" output_line = q + \"\\t\" + a for", "in line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a =", "a = tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q =", "q = q.translate(None, string.punctuation) q = q+\"?\" output_line = q + \"\\t\" +", "q+\"?\" output_line = q + \"\\t\" + a for i in range(2,len(tmp)): output_line", "idx = line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a", "import sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if", "import os import string import sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for", "for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?') tmp =", "= line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation)", "line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q", "in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q", "\") a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\" output_line", "open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?') tmp", "+ a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line =", "= sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line:", "a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q", "q + \"\\t\" + a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line)", "enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q =", "line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\"", "= tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q = q.translate(None,", "a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\"", "a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\" output_line =", "a.translate(None, string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\" output_line = q +", "i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line = line.translate(None, string.punctuation) output.write(line)", "sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\"", "q = line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None,", "output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx =", "q = q+\"?\" output_line = q + \"\\t\" + a for i in", "for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line = line.translate(None, string.punctuation)", "tmp[1].strip() a = a.replace(\",\",\" \") a = a.translate(None, string.punctuation) q = q.translate(None, string.punctuation)", "output_line = q + \"\\t\" + a for i in range(2,len(tmp)): output_line +=", "line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip()", "string.punctuation) q = q.translate(None, string.punctuation) q = q+\"?\" output_line = q + \"\\t\"", "os import string import sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line", "sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx", "mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in", "string.punctuation) q = q+\"?\" output_line = q + \"\\t\" + a for i", "\"?\" in line: idx = line.find('?') tmp = line[idx+1:].split('\\t') q = line[:idx+1] a", "line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \") a =", "string import sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")):", "import string import sys mode = sys.argv[1] output = open(\"punc_removed_\"+str(mode)+\".txt\",\"w\") for i,line in", "= q+\"?\" output_line = q + \"\\t\" + a for i in range(2,len(tmp)):", "a for i in range(2,len(tmp)): output_line += \"\\t\"+tmp[i] output.write(output_line) else: line = line.translate(None,", "= line[idx+1:].split('\\t') q = line[:idx+1] a = tmp[1].strip() a = a.replace(\",\",\" \") a", "i,line in enumerate(open(\"cleaned_squad_\"+str(mode)+\".txt\")): if \"?\" in line: idx = line.find('?') tmp = line[idx+1:].split('\\t')" ]
[ "as json_file: json_data = json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile =", "open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data = json_data self.keys = list(self.data.keys())", "list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx in sorted([(m.group(),", "m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line = re.sub(group, self.data[self.keys[idx]],", "in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line = re.sub(group, self.data[self.keys[idx]], line) return line", "def __init__(self, path: Path): with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data", "pathlib import Path import json import re class ReservedWords: def __init__(self, path: Path):", "encoding='UTF8') as json_file: json_data = json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile", "re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m", "class ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8') as json_file: json_data =", "from pathlib import Path import json import re class ReservedWords: def __init__(self, path:", "line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda", "= re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for", "translate(self, line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)],", "for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0],", "self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group()))", "str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x:", "self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx", "m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line = re.sub(group, self.data[self.keys[idx]], line) return", "import re class ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8') as json_file:", "= json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self,", "json import re class ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8') as", "json_data = json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def", "__init__(self, path: Path): with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data =", "json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line:", "for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line = re.sub(group, self.data[self.keys[idx]], line)", "json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group,", "path: Path): with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data = json_data", "group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True):", "Path import json import re class ReservedWords: def __init__(self, path: Path): with open(path,", "= list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for group, idx in", "json_file: json_data = json.load(json_file) self.data = json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\")", "in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line =", "sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line = re.sub(group,", "idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True): line", "re class ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8') as json_file: json_data", "= json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str): for", "import json import re class ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8')", "<filename>docs_translate/reserved_word.py<gh_stars>0 from pathlib import Path import json import re class ReservedWords: def __init__(self,", "import Path import json import re class ReservedWords: def __init__(self, path: Path): with", "with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data = json_data self.keys =", "ReservedWords: def __init__(self, path: Path): with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file)", "self.data = json_data self.keys = list(self.data.keys()) self.compile = re.compile(\"(\"+\")|(\".join(self.keys)+\")\") def translate(self, line: str):", "def translate(self, line: str): for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in", "Path): with open(path, encoding='UTF8') as json_file: json_data = json.load(json_file) self.data = json_data self.keys" ]
[ "one class --> A class is a blueprint for creating objects --> An", "the average grade of the student \"\"\" def main(): student = { 'name':", "objects can be created from one class --> A class is a blueprint", "print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade", "instance of the class \"\"\" class Student: \"\"\" a class to represent a", "def main(): student = { 'name': '<NAME>', 'grades': [90, 100, 98, 97, 96]", "be created from one class --> A class is a blueprint for creating", "'grades': [90, 100, 98, 97, 96] } def calculate_average(grades): average = grades /", "a students with a name and grades\"\"\" def __init__(self, names, grades): # The", "oriented programing allow programs to imitate the objects in the real world \"\"\"", "student..it models a students with a name and grades\"\"\" def __init__(self, names, grades):", "can be created from one class --> A class is a blueprint for", "Multiple independent objects can be created from one class --> A class is", "class is a blueprint for creating objects --> An object is a class", "class student self.name = names self.grade = grades def calculate_average(self): \"\"\"A function to", "a blueprint for creating objects --> An object is a class variable or", "the objects in the real world \"\"\" \"\"\" Create a student dict with", "the class \"\"\" class Student: \"\"\" a class to represent a student..it models", "a student..it models a students with a name and grades\"\"\" def __init__(self, names,", "a dictionary, now use a class to represent the student. --> Dictionaries cannot", "class \"\"\" class Student: \"\"\" a class to represent a student..it models a", "grades\"\"\" average = sum(self.grade) / len(self.grade) return average # Objects of the student", "objects in the real world \"\"\" \"\"\" Create a student dict with name", "student self.name = names self.grade = grades def calculate_average(self): \"\"\"A function to calculate", "class variable or an instance of the class \"\"\" class Student: \"\"\" a", "variable or an instance of the class \"\"\" class Student: \"\"\" a class", "98, 78, 89, 96]) # Accessing the fields in the objects print('') print(student_one.name)", "their data but classes can --> Multiple independent objects can be created from", "names self.grade = grades def calculate_average(self): \"\"\"A function to calculate the average of", "from one class --> A class is a blueprint for creating objects -->", "'name': '<NAME>', 'grades': [90, 100, 98, 97, 96] } def calculate_average(grades): average =", "self.name = names self.grade = grades def calculate_average(self): \"\"\"A function to calculate the", "The __init__() function is a constructor for the class student self.name = names", "A class is a blueprint for creating objects --> An object is a", "Student: \"\"\" a class to represent a student..it models a students with a", "--> A class is a blueprint for creating objects --> An object is", "\"\"\" Create a student dict with name and grades and write a function", "return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary, now", "def __init__(self, names, grades): # The __init__() function is a constructor for the", "world \"\"\" \"\"\" Create a student dict with name and grades and write", "dictionary, now use a class to represent the student. --> Dictionaries cannot contain", "act on their data but classes can --> Multiple independent objects can be", "with a name and grades\"\"\" def __init__(self, names, grades): # The __init__() function", "names, grades): # The __init__() function is a constructor for the class student", "the class student self.name = names self.grade = grades def calculate_average(self): \"\"\"A function", "now use a class to represent the student. --> Dictionaries cannot contain that", "a class to represent a student..it models a students with a name and", "independent objects can be created from one class --> A class is a", "average of a students grades\"\"\" average = sum(self.grade) / len(self.grade) return average #", "= Student('<NAME>', [90, 96, 98, 97, 100]) student_two = Student('<NAME>', [89, 98, 78,", "{student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}') if __name__ == '__main__':", "= sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary, now use a class", "97, 96] } def calculate_average(grades): average = grades / len(student['grades']) return average total", "but classes can --> Multiple independent objects can be created from one class", "average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary, now use", "class to represent a student..it models a students with a name and grades\"\"\"", "grades\"\"\" def __init__(self, names, grades): # The __init__() function is a constructor for", "name and grades\"\"\" def __init__(self, names, grades): # The __init__() function is a", "class Student: \"\"\" a class to represent a student..it models a students with", "print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is", "is a blueprint for creating objects --> An object is a class variable", "len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary,", "allow programs to imitate the objects in the real world \"\"\" \"\"\" Create", "calculate the average grade of the student \"\"\" def main(): student = {", "Instead of using a dictionary, now use a class to represent the student.", "created from one class --> A class is a blueprint for creating objects", "Create a student dict with name and grades and write a function to", "Student('<NAME>', [90, 96, 98, 97, 100]) student_two = Student('<NAME>', [89, 98, 78, 89,", "student. --> Dictionaries cannot contain that act on their data but classes can", "--> An object is a class variable or an instance of the class", "the real world \"\"\" \"\"\" Create a student dict with name and grades", "Accessing the fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is", "to represent a student..it models a students with a name and grades\"\"\" def", "grades and write a function to calculate the average grade of the student", "for creating objects --> An object is a class variable or an instance", "to calculate the average grade of the student \"\"\" def main(): student =", "using a dictionary, now use a class to represent the student. --> Dictionaries", "and grades\"\"\" def __init__(self, names, grades): # The __init__() function is a constructor", "calculate_average(grades): average = grades / len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\"", "len(self.grade) return average # Objects of the student class student_one = Student('<NAME>', [90,", "average = sum(self.grade) / len(self.grade) return average # Objects of the student class", "--> Dictionaries cannot contain that act on their data but classes can -->", "def calculate_average(grades): average = grades / len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total))", "100]) student_two = Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing the fields", "grades): # The __init__() function is a constructor for the class student self.name", "[90, 100, 98, 97, 96] } def calculate_average(grades): average = grades / len(student['grades'])", "97, 100]) student_two = Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing the", "return average # Objects of the student class student_one = Student('<NAME>', [90, 96,", "\"\"\" \"\"\" Create a student dict with name and grades and write a", "\"\"\" def main(): student = { 'name': '<NAME>', 'grades': [90, 100, 98, 97,", "average grade of the student \"\"\" def main(): student = { 'name': '<NAME>',", "def calculate_average(self): \"\"\"A function to calculate the average of a students grades\"\"\" average", "89, 96]) # Accessing the fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s", "= Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing the fields in the", "average = grades / len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead", "\"\"\" Object oriented programing allow programs to imitate the objects in the real", "the student. --> Dictionaries cannot contain that act on their data but classes", "grades / len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using", "--> Multiple independent objects can be created from one class --> A class", "of the student class student_one = Student('<NAME>', [90, 96, 98, 97, 100]) student_two", "student_one = Student('<NAME>', [90, 96, 98, 97, 100]) student_two = Student('<NAME>', [89, 98,", "for the class student self.name = names self.grade = grades def calculate_average(self): \"\"\"A", "class to represent the student. --> Dictionaries cannot contain that act on their", "100, 98, 97, 96] } def calculate_average(grades): average = grades / len(student['grades']) return", "that act on their data but classes can --> Multiple independent objects can", "objects --> An object is a class variable or an instance of the", "= grades def calculate_average(self): \"\"\"A function to calculate the average of a students", "function to calculate the average grade of the student \"\"\" def main(): student", "# Objects of the student class student_one = Student('<NAME>', [90, 96, 98, 97,", "fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('')", "represent a student..it models a students with a name and grades\"\"\" def __init__(self,", "the student \"\"\" def main(): student = { 'name': '<NAME>', 'grades': [90, 100,", "\"\"\" Instead of using a dictionary, now use a class to represent the", "Dictionaries cannot contain that act on their data but classes can --> Multiple", "models a students with a name and grades\"\"\" def __init__(self, names, grades): #", "An object is a class variable or an instance of the class \"\"\"", "an instance of the class \"\"\" class Student: \"\"\" a class to represent", "student \"\"\" def main(): student = { 'name': '<NAME>', 'grades': [90, 100, 98,", "= { 'name': '<NAME>', 'grades': [90, 100, 98, 97, 96] } def calculate_average(grades):", "student dict with name and grades and write a function to calculate the", "{ 'name': '<NAME>', 'grades': [90, 100, 98, 97, 96] } def calculate_average(grades): average", "a class to represent the student. --> Dictionaries cannot contain that act on", "to imitate the objects in the real world \"\"\" \"\"\" Create a student", "Object oriented programing allow programs to imitate the objects in the real world", "with name and grades and write a function to calculate the average grade", "/ len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a", "creating objects --> An object is a class variable or an instance of", "96, 98, 97, 100]) student_two = Student('<NAME>', [89, 98, 78, 89, 96]) #", "Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing the fields in the objects", "96] } def calculate_average(grades): average = grades / len(student['grades']) return average total =", "sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary, now use a class to", "object is a class variable or an instance of the class \"\"\" class", "and grades and write a function to calculate the average grade of the", "dict with name and grades and write a function to calculate the average", "students with a name and grades\"\"\" def __init__(self, names, grades): # The __init__()", "is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}') if __name__ ==", "is a class variable or an instance of the class \"\"\" class Student:", "student_two = Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing the fields in", "in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name)", "total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of using a dictionary, now use a", "average # Objects of the student class student_one = Student('<NAME>', [90, 96, 98,", "student class student_one = Student('<NAME>', [90, 96, 98, 97, 100]) student_two = Student('<NAME>',", "is a constructor for the class student self.name = names self.grade = grades", "= sum(self.grade) / len(self.grade) return average # Objects of the student class student_one", "name and grades and write a function to calculate the average grade of", "the average of a students grades\"\"\" average = sum(self.grade) / len(self.grade) return average", "print(calculate_average(total)) \"\"\" Instead of using a dictionary, now use a class to represent", "classes can --> Multiple independent objects can be created from one class -->", "data but classes can --> Multiple independent objects can be created from one", "real world \"\"\" \"\"\" Create a student dict with name and grades and", "of the class \"\"\" class Student: \"\"\" a class to represent a student..it", "sum(self.grade) / len(self.grade) return average # Objects of the student class student_one =", "# Accessing the fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade", "the fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}')", "can --> Multiple independent objects can be created from one class --> A", "Objects of the student class student_one = Student('<NAME>', [90, 96, 98, 97, 100])", "\"\"\"A function to calculate the average of a students grades\"\"\" average = sum(self.grade)", "student = { 'name': '<NAME>', 'grades': [90, 100, 98, 97, 96] } def", "class --> A class is a blueprint for creating objects --> An object", "objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s", "constructor for the class student self.name = names self.grade = grades def calculate_average(self):", "cannot contain that act on their data but classes can --> Multiple independent", "function is a constructor for the class student self.name = names self.grade =", "a constructor for the class student self.name = names self.grade = grades def", "calculate the average of a students grades\"\"\" average = sum(self.grade) / len(self.grade) return", "the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade)", "use a class to represent the student. --> Dictionaries cannot contain that act", "calculate_average(self): \"\"\"A function to calculate the average of a students grades\"\"\" average =", "a student dict with name and grades and write a function to calculate", "a class variable or an instance of the class \"\"\" class Student: \"\"\"", "78, 89, 96]) # Accessing the fields in the objects print('') print(student_one.name) print(student_one.grade)", "in the real world \"\"\" \"\"\" Create a student dict with name and", "represent the student. --> Dictionaries cannot contain that act on their data but", "96]) # Accessing the fields in the objects print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average", "a name and grades\"\"\" def __init__(self, names, grades): # The __init__() function is", "= grades / len(student['grades']) return average total = sum(student['grades']) print(calculate_average(total)) \"\"\" Instead of", "the student class student_one = Student('<NAME>', [90, 96, 98, 97, 100]) student_two =", "a function to calculate the average grade of the student \"\"\" def main():", "contain that act on their data but classes can --> Multiple independent objects", "self.grade = grades def calculate_average(self): \"\"\"A function to calculate the average of a", "class student_one = Student('<NAME>', [90, 96, 98, 97, 100]) student_two = Student('<NAME>', [89,", "/ len(self.grade) return average # Objects of the student class student_one = Student('<NAME>',", "write a function to calculate the average grade of the student \"\"\" def", "of using a dictionary, now use a class to represent the student. -->", "imitate the objects in the real world \"\"\" \"\"\" Create a student dict", "= names self.grade = grades def calculate_average(self): \"\"\"A function to calculate the average", "of a students grades\"\"\" average = sum(self.grade) / len(self.grade) return average # Objects", "grades def calculate_average(self): \"\"\"A function to calculate the average of a students grades\"\"\"", "to calculate the average of a students grades\"\"\" average = sum(self.grade) / len(self.grade)", "98, 97, 100]) student_two = Student('<NAME>', [89, 98, 78, 89, 96]) # Accessing", "main(): student = { 'name': '<NAME>', 'grades': [90, 100, 98, 97, 96] }", "print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}')", "grade of the student \"\"\" def main(): student = { 'name': '<NAME>', 'grades':", "students grades\"\"\" average = sum(self.grade) / len(self.grade) return average # Objects of the", "on their data but classes can --> Multiple independent objects can be created", "of the student \"\"\" def main(): student = { 'name': '<NAME>', 'grades': [90,", "<reponame>jkuatdsc/Python101 \"\"\" Object oriented programing allow programs to imitate the objects in the", "grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}') if __name__", "to represent the student. --> Dictionaries cannot contain that act on their data", "\"\"\" class Student: \"\"\" a class to represent a student..it models a students", "'<NAME>', 'grades': [90, 100, 98, 97, 96] } def calculate_average(grades): average = grades", "a students grades\"\"\" average = sum(self.grade) / len(self.grade) return average # Objects of", "blueprint for creating objects --> An object is a class variable or an", "# The __init__() function is a constructor for the class student self.name =", "__init__() function is a constructor for the class student self.name = names self.grade", "average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}') if", "98, 97, 96] } def calculate_average(grades): average = grades / len(student['grades']) return average", "programs to imitate the objects in the real world \"\"\" \"\"\" Create a", "print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average grade is {student_two.calculate_average()}') if __name__ == '__main__': main()", "programing allow programs to imitate the objects in the real world \"\"\" \"\"\"", "and write a function to calculate the average grade of the student \"\"\"", "or an instance of the class \"\"\" class Student: \"\"\" a class to", "function to calculate the average of a students grades\"\"\" average = sum(self.grade) /", "__init__(self, names, grades): # The __init__() function is a constructor for the class", "print('') print(student_one.name) print(student_one.grade) print(f'{student_one.name}\\'s average grade is {student_one.calculate_average()}') print('') print(student_two.name) print(student_two.grade) print(f'{student_two.name}\\'s average", "[89, 98, 78, 89, 96]) # Accessing the fields in the objects print('')", "\"\"\" a class to represent a student..it models a students with a name", "} def calculate_average(grades): average = grades / len(student['grades']) return average total = sum(student['grades'])", "[90, 96, 98, 97, 100]) student_two = Student('<NAME>', [89, 98, 78, 89, 96])" ]
[ "get_embeddings(model, images): prelogits, _, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return", "train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt =", "config path', default='configs/config.yaml') args = parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:])", "return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to", "ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer", "range(embs.shape[0]): for j in range(embs.shape[0]): val = 0 for k in range(512): val", "def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config path',", "parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv) return args def", "gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored", "absolute_import, division, print_function, unicode_literals import argparse import os import sys import tensorflow as", "i in range(embs.shape[0]): for j in range(embs.shape[0]): val = 0 for k in", "__future__ import absolute_import, division, print_function, unicode_literals import argparse import os import sys import", "in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1): embs = get_embeddings(model, img)", "recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _,", "for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1): embs =", "layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1): embs = get_embeddings(model,", "= argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args =", "division, print_function, unicode_literals import argparse import os import sys import tensorflow as tf", "_ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser", "args = parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from", "_ in train_data.take(1): embs = get_embeddings(model, img) for i in range(embs.shape[0]): for j", "= model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser =", "model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train", "img) for i in range(embs.shape[0]): for j in range(embs.shape[0]): val = 0 for", "def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path)", "print_function, unicode_literals import argparse import os import sys import tensorflow as tf import", "parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args", "parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import", "import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _", "with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _", "as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data()", "GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data,", "Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir", "face network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv) return", "= gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()", "sys import tensorflow as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models", "import argparse import os import sys import tensorflow as tf import yaml from", "range(512): val += embs[i][k] * embs[j][k] print(i, j, val) if __name__ == '__main__':", "MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) #", "_, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv):", "= parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data", "MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ = model(images, training=False) embeddings =", "for k in range(512): val += embs[i][k] * embs[j][k] print(i, j, val) if", "training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face", "{}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1):", "0 for k in range(512): val += embs[i][k] * embs[j][k] print(i, j, val)", "import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ = model(images, training=False) embeddings", "to config path', default='configs/config.yaml') args = parser.parse_args(argv) return args def main(): args =", "import os import sys import tensorflow as tf import yaml from recognition.backbones.resnet_v1 import", "argparse import os import sys import tensorflow as tf import yaml from recognition.backbones.resnet_v1", "val += embs[i][k] * embs[j][k] print(i, j, val) if __name__ == '__main__': #", "args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg:", "embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network')", "recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd =", "tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for", "return args def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData", "unicode_literals import argparse import os import sys import tensorflow as tf import yaml", "parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config =", "axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path", "os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)):", "logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader)", "train_data.take(1): embs = get_embeddings(model, img) for i in range(embs.shape[0]): for j in range(embs.shape[0]):", "ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ =", "images): prelogits, _, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings", "os import sys import tensorflow as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50", "def get_embeddings(model, images): prelogits, _, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1)", "* embs[j][k] print(i, j, val) if __name__ == '__main__': # logger.info(\"hello, insightface/recognition\") main()", "# logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg,", "help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv) return args def main(): args", "in range(embs.shape[0]): for j in range(embs.shape[0]): val = 0 for k in range(512):", "args def main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with", "yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images):", "from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ = model(images,", "j in range(embs.shape[0]): val = 0 for k in range(512): val += embs[i][k]", "import tensorflow as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import", "embs = get_embeddings(model, img) for i in range(embs.shape[0]): for j in range(embs.shape[0]): val", "= parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config", "embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config", "from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits,", "= yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50,", "gd = GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir =", "in range(512): val += embs[i][k] * embs[j][k] print(i, j, val) if __name__ ==", "ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img,", "= MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir)))", "in train_data.take(1): embs = get_embeddings(model, img) for i in range(embs.shape[0]): for j in", "type=str, help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv) return args def main():", "k in range(512): val += embs[i][k] * embs[j][k] print(i, j, val) if __name__", "import absolute_import, division, print_function, unicode_literals import argparse import os import sys import tensorflow", "embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for", "model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from", "path', default='configs/config.yaml') args = parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:]) #", "print(layer) for img, _ in train_data.take(1): embs = get_embeddings(model, img) for i in", "import sys import tensorflow as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from", "open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ =", "tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def", "GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt", "print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _", "# for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1): embs", "recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ = model(images, training=False)", "val = 0 for k in range(512): val += embs[i][k] * embs[j][k] print(i,", "tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str,", "= get_embeddings(model, img) for i in range(embs.shape[0]): for j in range(embs.shape[0]): val =", "= tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer)", "import GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config)", "cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data() model", "ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): #", "= tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path',", "import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution() def get_embeddings(model,", "parse_args(argv): parser = argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml')", "= os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone) ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial() print(\"Restored from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in", "prelogits, _, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits, axis=-1) return embeddings def", "default='configs/config.yaml') args = parser.parse_args(argv) return args def main(): args = parse_args(sys.argv[1:]) # logger.info(args)", "get_embeddings(model, img) for i in range(embs.shape[0]): for j in range(embs.shape[0]): val = 0", "for j in range(embs.shape[0]): val = 0 for k in range(512): val +=", "from recognition.data.generate_data import GenerateData with open(args.config_path) as cfg: config = yaml.load(cfg, Loader=yaml.FullLoader) gd", "= GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir'])", "range(embs.shape[0]): val = 0 for k in range(512): val += embs[i][k] * embs[j][k]", "+= embs[i][k] * embs[j][k] print(i, j, val) if __name__ == '__main__': # logger.info(\"hello,", "embs[i][k] * embs[j][k] print(i, j, val) if __name__ == '__main__': # logger.info(\"hello, insightface/recognition\")", "# print(layer) for img, _ in train_data.take(1): embs = get_embeddings(model, img) for i", "<reponame>w-garcia/insightface from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os import", "as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel tf.enable_eager_execution()", "from __future__ import absolute_import, division, print_function, unicode_literals import argparse import os import sys", "main(): args = parse_args(sys.argv[1:]) # logger.info(args) from recognition.data.generate_data import GenerateData with open(args.config_path) as", "tf.enable_eager_execution() def get_embeddings(model, images): prelogits, _, _ = model(images, training=False) embeddings = tf.nn.l2_normalize(prelogits,", "config = yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data() model =", "from {}\".format(tf.train.latest_checkpoint(ckpt_dir))) # for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in", "tensorflow as tf import yaml from recognition.backbones.resnet_v1 import ResNet_v1_50 from recognition.models.models import MyModel", "for i in range(embs.shape[0]): for j in range(embs.shape[0]): val = 0 for k", "img, _ in train_data.take(1): embs = get_embeddings(model, img) for i in range(embs.shape[0]): for", "= 0 for k in range(512): val += embs[i][k] * embs[j][k] print(i, j,", "for img, _ in train_data.take(1): embs = get_embeddings(model, img) for i in range(embs.shape[0]):", "_ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size']) ckpt_dir = os.path.expanduser(config['ckpt_dir']) ckpt = tf.train.Checkpoint(backbone=model.backbone)", "network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv) return args", "argparse.ArgumentParser(description='Train face network') parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml') args = parser.parse_args(argv)", "yaml.load(cfg, Loader=yaml.FullLoader) gd = GenerateData(config) train_data, _ = gd.get_train_data() model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size'])", "in range(embs.shape[0]): val = 0 for k in range(512): val += embs[i][k] *", "tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)): # print(layer) for img, _ in train_data.take(1): embs = get_embeddings(model, img) for" ]
[ "1 b = \"foo\" c = (d, e) di = {f: 1, g:", "= 1 b = \"foo\" c = (d, e) di = {f: 1,", "a = 1 b = \"foo\" c = (d, e) di = {f:", "b = \"foo\" c = (d, e) di = {f: 1, g: 2}" ]
[ ":type nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for x in nums:", "Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount =", "return true if any value appears at least twice in the array, and", "# Output: true # Example 2: # # Input: [1,2,3,4] # Output: false", "# # Input: [1,2,3,4] # Output: false # Example 3: # # Input:", "class Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount", "in the array, and it should return false if every element is distinct.", "at least twice in the array, and it should return false if every", "the array contains any duplicates. # # Your function should return true if", "every element is distinct. # # Example 1: # # Input: [1,2,3,1] #", "Given an array of integers, find if the array contains any duplicates. #", "# Input: [1,2,3,4] # Output: false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2]", "3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self, nums):", "the array, and it should return false if every element is distinct. #", "duplicates. # # Your function should return true if any value appears at", "2: # # Input: [1,2,3,4] # Output: false # Example 3: # #", "and it should return false if every element is distinct. # # Example", "function should return true if any value appears at least twice in the", "false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution:", "1: # # Input: [1,2,3,1] # Output: true # Example 2: # #", "should return false if every element is distinct. # # Example 1: #", "Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self,", "true # Example 2: # # Input: [1,2,3,4] # Output: false # Example", "= collections.defaultdict(int) for x in nums: if numCount[x] == 1: return True numCount[x]", "\"\"\" numCount = collections.defaultdict(int) for x in nums: if numCount[x] == 1: return", "distinct. # # Example 1: # # Input: [1,2,3,1] # Output: true #", "Example 2: # # Input: [1,2,3,4] # Output: false # Example 3: #", "Input: [1,2,3,1] # Output: true # Example 2: # # Input: [1,2,3,4] #", "an array of integers, find if the array contains any duplicates. # #", "Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self, nums): \"\"\" :type nums:", "false if every element is distinct. # # Example 1: # # Input:", "array of integers, find if the array contains any duplicates. # # Your", "if every element is distinct. # # Example 1: # # Input: [1,2,3,1]", "if any value appears at least twice in the array, and it should", "of integers, find if the array contains any duplicates. # # Your function", "find if the array contains any duplicates. # # Your function should return", "Example 1: # # Input: [1,2,3,1] # Output: true # Example 2: #", "List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for x in nums: if numCount[x]", "any value appears at least twice in the array, and it should return", "# Given an array of integers, find if the array contains any duplicates.", "collections.defaultdict(int) for x in nums: if numCount[x] == 1: return True numCount[x] +=", "# Your function should return true if any value appears at least twice", "appears at least twice in the array, and it should return false if", "for x in nums: if numCount[x] == 1: return True numCount[x] += 1", "# # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self, nums): \"\"\"", "# Input: [1,2,3,1] # Output: true # Example 2: # # Input: [1,2,3,4]", "in nums: if numCount[x] == 1: return True numCount[x] += 1 return False", "def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int)", "should return true if any value appears at least twice in the array,", "Output: true # Example 2: # # Input: [1,2,3,4] # Output: false #", "least twice in the array, and it should return false if every element", "it should return false if every element is distinct. # # Example 1:", "element is distinct. # # Example 1: # # Input: [1,2,3,1] # Output:", "is distinct. # # Example 1: # # Input: [1,2,3,1] # Output: true", "[1,2,3,4] # Output: false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output:", "# # Example 1: # # Input: [1,2,3,1] # Output: true # Example", "# Example 1: # # Input: [1,2,3,1] # Output: true # Example 2:", "Output: true class Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool", "Hash Table # Given an array of integers, find if the array contains", "if the array contains any duplicates. # # Your function should return true", "# Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self, nums): \"\"\" :type", "x in nums: if numCount[x] == 1: return True numCount[x] += 1 return", "Table # Given an array of integers, find if the array contains any", "# # Your function should return true if any value appears at least", "# Array; Hash Table # Given an array of integers, find if the", "Output: false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class", "Input: [1,2,3,4] # Output: false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] #", "containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for", "nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for x", "return false if every element is distinct. # # Example 1: # #", "array contains any duplicates. # # Your function should return true if any", "# Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def", "Array; Hash Table # Given an array of integers, find if the array", "true class Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype: bool \"\"\"", "# # Input: [1,2,3,1] # Output: true # Example 2: # # Input:", ":rtype: bool \"\"\" numCount = collections.defaultdict(int) for x in nums: if numCount[x] ==", "# Output: true class Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int] :rtype:", "true if any value appears at least twice in the array, and it", "[1,1,1,3,3,4,3,2,4,2] # Output: true class Solution: def containsDuplicate(self, nums): \"\"\" :type nums: List[int]", "bool \"\"\" numCount = collections.defaultdict(int) for x in nums: if numCount[x] == 1:", "twice in the array, and it should return false if every element is", "integers, find if the array contains any duplicates. # # Your function should", "Your function should return true if any value appears at least twice in", "\"\"\" :type nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for x in", "nums: List[int] :rtype: bool \"\"\" numCount = collections.defaultdict(int) for x in nums: if", "any duplicates. # # Your function should return true if any value appears", "[1,2,3,1] # Output: true # Example 2: # # Input: [1,2,3,4] # Output:", "# Example 2: # # Input: [1,2,3,4] # Output: false # Example 3:", "contains any duplicates. # # Your function should return true if any value", "array, and it should return false if every element is distinct. # #", "value appears at least twice in the array, and it should return false", "# Output: false # Example 3: # # Input: [1,1,1,3,3,4,3,2,4,2] # Output: true", "numCount = collections.defaultdict(int) for x in nums: if numCount[x] == 1: return True" ]
[ "start: str, end: str, bank: List[str]) -> int: bfs = [start] genes =", "0 while bfs: arr = [] for g in bfs: if g ==", "cnt for i, c in enumerate(g): for new in 'AGTC': if new !=", "set(bank) cnt = 0 while bfs: arr = [] for g in bfs:", "new != c: s = g[:i] + new + g[i + 1:] if", "1:] if s in genes: arr.append(s) genes.discard(s) bfs = arr cnt += 1", "g[i + 1:] if s in genes: arr.append(s) genes.discard(s) bfs = arr cnt", "in 'AGTC': if new != c: s = g[:i] + new + g[i", "[] for g in bfs: if g == end: return cnt for i,", "end: str, bank: List[str]) -> int: bfs = [start] genes = set(bank) cnt", "int: bfs = [start] genes = set(bank) cnt = 0 while bfs: arr", "= 0 while bfs: arr = [] for g in bfs: if g", "if g == end: return cnt for i, c in enumerate(g): for new", "minMutation(self, start: str, end: str, bank: List[str]) -> int: bfs = [start] genes", "bfs: if g == end: return cnt for i, c in enumerate(g): for", "genes = set(bank) cnt = 0 while bfs: arr = [] for g", "def minMutation(self, start: str, end: str, bank: List[str]) -> int: bfs = [start]", "for i, c in enumerate(g): for new in 'AGTC': if new != c:", "for g in bfs: if g == end: return cnt for i, c", "bank: List[str]) -> int: bfs = [start] genes = set(bank) cnt = 0", "g[:i] + new + g[i + 1:] if s in genes: arr.append(s) genes.discard(s)", "enumerate(g): for new in 'AGTC': if new != c: s = g[:i] +", "return cnt for i, c in enumerate(g): for new in 'AGTC': if new", "+ 1:] if s in genes: arr.append(s) genes.discard(s) bfs = arr cnt +=", "[start] genes = set(bank) cnt = 0 while bfs: arr = [] for", "bfs = [start] genes = set(bank) cnt = 0 while bfs: arr =", "List[str]) -> int: bfs = [start] genes = set(bank) cnt = 0 while", "c in enumerate(g): for new in 'AGTC': if new != c: s =", "s in genes: arr.append(s) genes.discard(s) bfs = arr cnt += 1 return -1", "new in 'AGTC': if new != c: s = g[:i] + new +", "+ g[i + 1:] if s in genes: arr.append(s) genes.discard(s) bfs = arr", "Solution: def minMutation(self, start: str, end: str, bank: List[str]) -> int: bfs =", "arr = [] for g in bfs: if g == end: return cnt", "new + g[i + 1:] if s in genes: arr.append(s) genes.discard(s) bfs =", "in enumerate(g): for new in 'AGTC': if new != c: s = g[:i]", "while bfs: arr = [] for g in bfs: if g == end:", "g in bfs: if g == end: return cnt for i, c in", "s = g[:i] + new + g[i + 1:] if s in genes:", "+ new + g[i + 1:] if s in genes: arr.append(s) genes.discard(s) bfs", "if new != c: s = g[:i] + new + g[i + 1:]", "== end: return cnt for i, c in enumerate(g): for new in 'AGTC':", "= [] for g in bfs: if g == end: return cnt for", "in bfs: if g == end: return cnt for i, c in enumerate(g):", "-> int: bfs = [start] genes = set(bank) cnt = 0 while bfs:", "= g[:i] + new + g[i + 1:] if s in genes: arr.append(s)", "str, bank: List[str]) -> int: bfs = [start] genes = set(bank) cnt =", "'AGTC': if new != c: s = g[:i] + new + g[i +", "class Solution: def minMutation(self, start: str, end: str, bank: List[str]) -> int: bfs", "g == end: return cnt for i, c in enumerate(g): for new in", "c: s = g[:i] + new + g[i + 1:] if s in", "if s in genes: arr.append(s) genes.discard(s) bfs = arr cnt += 1 return", "str, end: str, bank: List[str]) -> int: bfs = [start] genes = set(bank)", "end: return cnt for i, c in enumerate(g): for new in 'AGTC': if", "for new in 'AGTC': if new != c: s = g[:i] + new", "!= c: s = g[:i] + new + g[i + 1:] if s", "= set(bank) cnt = 0 while bfs: arr = [] for g in", "i, c in enumerate(g): for new in 'AGTC': if new != c: s", "cnt = 0 while bfs: arr = [] for g in bfs: if", "= [start] genes = set(bank) cnt = 0 while bfs: arr = []", "bfs: arr = [] for g in bfs: if g == end: return" ]
[ "op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic commands ### def downgrade(): ###", "op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands", "identifiers, used by Alembic. from sqlalchemy import Integer, String revision = '4c087f9202a' down_revision", "generated by Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey')", "from sqlalchemy import Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic", "'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids') ### end", "revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as", "### def downgrade(): ### commands auto generated by Alembic - please adjust! ###", "'news', 'users', ['user_id'], ['id']) ### end Alembic commands ### def downgrade(): ### commands", "auto generated by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[]))", "existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ###", "['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic commands ### def downgrade():", "sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id'])", "sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated by Alembic", "4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by", "generated by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups',", "### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic", "'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic commands ###", "['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic commands ### def", "### commands auto generated by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String),", "# revision identifiers, used by Alembic. from sqlalchemy import Integer, String revision =", "Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic import op import", "by Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups',", "import postgresql def upgrade(): ### commands auto generated by Alembic - please adjust!", "Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by Alembic. from sqlalchemy", "postgresql def upgrade(): ### commands auto generated by Alembic - please adjust! ###", "nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news',", "postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None,", "2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by Alembic. from sqlalchemy import Integer,", "= '<PASSWORD>' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import", "op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids') ###", "Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id',", "auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups',", "please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None,", "adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups',", "\"\"\"empty message Revision ID: 4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" #", "- please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(),", "import Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic import op", "used by Alembic. from sqlalchemy import Integer, String revision = '4c087f9202a' down_revision =", "commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None,", "<filename>migrations/versions/4c087f9202a_.py \"\"\"empty message Revision ID: 4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\"", "<PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by Alembic. from", "### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids')", "from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated by Alembic -", "adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups',", "commands ### def downgrade(): ### commands auto generated by Alembic - please adjust!", "- please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True)", "message Revision ID: 4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision", "\"\"\" # revision identifiers, used by Alembic. from sqlalchemy import Integer, String revision", "downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'news',", "alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade():", "Revision ID: 4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers,", "['id']) ### end Alembic commands ### def downgrade(): ### commands auto generated by", "sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated", "ID: 4c087f9202a Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used", "07:30:03.072213 \"\"\" # revision identifiers, used by Alembic. from sqlalchemy import Integer, String", "'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic commands", "default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users',", "'users', ['user_id'], ['id']) ### end Alembic commands ### def downgrade(): ### commands auto", "from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def", "as sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated by", "Alembic. from sqlalchemy import Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>' from", "commands auto generated by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True,", "please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False)", "def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('groups',", "nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end", "by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id',", "### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users',", "['user_id'], ['id']) ### end Alembic commands ### def downgrade(): ### commands auto generated", "String revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy", "'<PASSWORD>' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql", "Revises: <PASSWORD> Create Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by Alembic.", "op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'],", "= '4c087f9202a' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa", "down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects", "import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): ### commands auto", "end Alembic commands ### def downgrade(): ### commands auto generated by Alembic -", "op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'],", "'4c087f9202a' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa from", "op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids') ### end Alembic commands", "'user_id', existing_type=sa.INTEGER(), nullable=True) op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id'])", "revision identifiers, used by Alembic. from sqlalchemy import Integer, String revision = '4c087f9202a'", "sqlalchemy.dialects import postgresql def upgrade(): ### commands auto generated by Alembic - please", "Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[])) op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(),", "Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please", "### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'news', type_='foreignkey')", "def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None,", "sqlalchemy import Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>' from alembic import", "by Alembic. from sqlalchemy import Integer, String revision = '4c087f9202a' down_revision = '<PASSWORD>'", "import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): ###", "upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('groups', sa.Column('user_ids',", "op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id']) op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id']) ### end Alembic", "'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids') ### end Alembic commands ###", "Date: 2018-08-02 07:30:03.072213 \"\"\" # revision identifiers, used by Alembic. from sqlalchemy import", "type_='foreignkey') op.drop_constraint(None, 'groups', type_='foreignkey') op.alter_column('groups', 'user_id', existing_type=sa.INTEGER(), nullable=False) op.drop_column('groups', 'user_ids') ### end Alembic" ]
[ "<gh_stars>0 from flask_cors import CORS from flex.conf import config cors = CORS(origins=config.CORS_ORIGINS, supports_credentials=True)" ]
[ "Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification", "is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please", "{ \"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\",", "is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\", }, }", "\"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY:", "noqa # fmt: off from .choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT:", "{ # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\":", "code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\", },", "ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", },", "}, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\",", "\"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, # Sms", "to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code", "}, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA:", "}, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\", }, } # fmt:", "flake8: noqa # fmt: off from .choices import ContentType CONTENT_PRESETS= { # Emails", "reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is", "import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", },", "\"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code", "\"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to", "{ \"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification", "ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\", }, } # fmt: on", ".choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\",", "ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset", "\"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset", "your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT:", "}, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\":", "\"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\", },", "# Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your", "fmt: off from .choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\":", "# fmt: off from .choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: {", "\"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\",", "\"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", },", "{ \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is", "}, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a", "href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your", "email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: {", "{email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click", "CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: {", "ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the", "# flake8: noqa # fmt: off from .choices import ContentType CONTENT_PRESETS= { #", "Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your", "password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\", },", "ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your", "Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\":", "{ \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, #", "link</a> to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification", "code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\":", "ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY: { \"content\": \"Your verification code", "\"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is", "{phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\", }, } #", "from .choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your", "\"Please click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION:", "ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify your email.\", }, ContentType.EMAIL_VERIFICATION_BODY:", "off from .choices import ContentType CONTENT_PRESETS= { # Emails ContentType.EMAIL_VERIFICATION_SUBJECT: { \"content\": \"Verify", "verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: { \"content\": \"Your verification code is {phone_verification_code_2fa}\",", "# Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\", }, ContentType.PHONE_VERIFICATION_2FA: {", "{ \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: { \"content\": \"Please click <a href=\\\"{password_verification_link}\\\">the link</a>", "click <a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: {", "<a href=\\\"{password_verification_link}\\\">the link</a> to reset your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\":", "your password.\", }, # Sms ContentType.PHONE_VERIFICATION: { \"content\": \"Your verification code is {phone_verification_code}\",", "verification code is {email_verification_code}.\", }, ContentType.EMAIL_RESET_PASSWORD_SUBJECT: { \"content\": \"Reset Password.\", }, ContentType.EMAIL_RESET_PASSWORD_BODY: {" ]
[ "dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [", "interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means =", "zip(bars, stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean + error +", "= \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset in", "3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space: for", "in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE,", "\"metricImageSpace.png\")) plt.close() # Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors =", "] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds", "# Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for", "NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset", "\"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means,", "datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults", "datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean, metricResults)))", "[ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda", "for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for", "np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults)))", "+ \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent space: for dataset in", "enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean +", "fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight =", "= np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4,", "labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE)", "stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES]", "* bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 )", "from evaluation.results import packageResults from dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10", "in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"]", "= [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\"", "capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0)", "rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda", "extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture", ") plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() #", "space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults", "x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for bar, error,", "image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors:", "x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds =", "mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset", "Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor", "plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" + interpolationFactor + \"_\" +", "x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if", "if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars", "for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ]", "interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels,", "matplotlib.pyplot as plt from evaluation.results import packageResults from dissertation import datasetInfo from config.routes", "np import matplotlib.pyplot as plt from evaluation.results import packageResults from dissertation import datasetInfo", "max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for bar,", "np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] +", "yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in", "capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent", "datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds =", "plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\",", "packageResults from dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14", "NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor +", "packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys())", "bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\",", "in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES:", "rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\"))", "list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"]", "$\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda", "dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in", "bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout()", "in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in", "+ error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset +", "import matplotlib.pyplot as plt from evaluation.results import packageResults from dissertation import datasetInfo from", "interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique", "= plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE)", "= max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for", "+ error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset +", "+ \"metricImageSpace.png\")) plt.close() # Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors", "x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"]", "plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 *", "rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE)", "plt from evaluation.results import packageResults from dissertation import datasetInfo from config.routes import getRecordedResultsRoute", "plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre),", "labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique ==", "error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\"", "dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4", "plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 *", "error, mean in zip(bars, stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean", "extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor", "plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults))", "datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90)", "ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\"", "\\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in", "0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0", "+ 0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0,", "extraHeight = 0.0125 * maxVal for bar, error, mean in zip(bars, stds, means):", "interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x:", "for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x,", "bar.get_x() + 0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\",", "space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for", "plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal", "plt.close() # Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys())", "latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors:", "for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean,", "x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars =", "import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\"", "means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels", "metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x,", "\"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER:", "else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds,", "interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent space: for dataset", "+ \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique", "for bar, error, mean in zip(bars, stds, means): plt.text( bar.get_x() + 0.5 *", "interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults =", "= np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels =", "in zip(bars, stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean + error", "rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" + interpolationFactor +", "= [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds,", "* 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in", "= np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda", "from dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 *", "= np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique]", "\"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent space:", "NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture +", "metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ]", "maxVal for bar, error, mean in zip(bars, stds, means): plt.text( bar.get_x() + 0.5", "\"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER:", "in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in", "1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults)))", "Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight", "as np import matplotlib.pyplot as plt from evaluation.results import packageResults from dissertation import", "interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors", "datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ]", "interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x:", "means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\",", "for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x,", "metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in", "fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x:", "mean in zip(bars, stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean +", "for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for", "for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults =", "np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"]", "list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"]", "plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125", "interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars =", "labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal", "plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125", "bar, error, mean in zip(bars, stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(),", "import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 /", "* bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE )", "yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE)", "+ \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent", "metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means", "fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight =", "x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for", "interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES", "6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation", "fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0)", "x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars", "= np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation,", "+ \"_\" + architecture + \"_\" + interpolationFactor + \"_\" + \"metricLatentSpace.png\")) plt.close()", "[datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5)", "from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT", "MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults =", "= 0.0125 * maxVal for bar, error, mean in zip(bars, stds, means): plt.text(", "in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in", "# Distance in latent space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for", "import numpy as np import matplotlib.pyplot as plt from evaluation.results import packageResults from", "datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90)", "stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\", "plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean", "datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES))", "mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset", "np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6))", "datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3", "fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" + interpolationFactor", "plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}},", "fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal =", "va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" +", "] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda", "for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture", "+ interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in latent space: for", "plt.text( bar.get_x() + 0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\",", "= [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means =", "+ extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" +", "in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds", "plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed", "/ 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image space:", "x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for bar, error, mean", "in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x =", "+ 0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE,", "Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor", "in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE,", "ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\"", "0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE", "+ 1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation,", "[ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means", "Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x)) in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal =", "plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance", "4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance in image", "stds, means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean + error + extraHeight,", "+ extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" +", "plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed", "plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x:", "config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT =", "in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in", "[datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES]", "= list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [", "* maxVal for bar, error, mean in zip(bars, stds, means): plt.text( bar.get_x() +", "means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"ED(enc(x_centre), enc(x))", "in Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation,", "= list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: for architecture in datasetInfo.ARCH_TYPES: metricResults = [", "metricResults)) extraHeight = 0.0125 * maxVal for bar, error, mean in zip(bars, stds,", "x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] +", "14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() # Distance", "np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique]", "= 14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary() #", "Latent Space\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults))", "+ [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1)", "[datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else", "(\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8))", "plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close() # Distance in", "+ (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4,", "\"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\") for", "in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean,", "+ \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique == \"interpolateLatentSpace\" else \"\")", "evaluation.results import packageResults from dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE", "interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique", "\"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x, means, yerr=stds, capsize=5)", "= [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x", "8)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation", "means): plt.text( bar.get_x() + 0.5 * bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean),", "interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means, yerr=stds, capsize=5) plt.xticks(x, labels,", "== \"interpolateLatentSpace\" else \"\") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 8)) bars = plt.bar(x,", "0.0125 * maxVal for bar, error, mean in zip(bars, stds, means): plt.text( bar.get_x()", "for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda", "fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation,", "import packageResults from dissertation import datasetInfo from config.routes import getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE =", "metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] +", "Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean", "metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\" if interpolationTechnique", "interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES", "np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x:", "plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" + interpolationFactor + \"_\" + \"metricLatentSpace.png\"))", "getRecordedResultsRoute MINI_FONTSIZE=10 FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults", "x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for bar, error, mean in zip(bars,", "x.mean, metricResults))) stds = np.array(list(map(lambda x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique", "FONTSIZE = 14 * 4 / 3 NUMBER_FORMAT = \"{:.0f}\" interpolationResults = packageResults.interpolationResults.getDictionary()", "interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]].keys()) for interpolationFactor in interpolationFactors: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] +", "[ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x =", "as plt from evaluation.results import packageResults from dissertation import datasetInfo from config.routes import", "interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"conv\"]][interpolationFactor][\"interpolateLatentSpace\"][\"metricImageSpace\"] ] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)", "maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal", "error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\"", "labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES] plt.figure(figsize=(4, 6)) bars = plt.bar(x, means,", "] + [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][\"dense\"]][interpolationFactor][interpolationTechnique][\"metricImageSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) +", "$\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean +", "bar.get_width(), mean + error + extraHeight, NUMBER_FORMAT.format(mean), ha=\"center\", va=\"bottom\", rotation=0, fontsize=MINI_FONTSIZE ) plt.tight_layout()", "= packageResults.interpolationResults.getDictionary() # Distance in image space: for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER: interpolationFactors =", "va=\"bottom\", fontsize=MINI_FONTSIZE, rotation=0 ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" +", "interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults)))", ") plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + architecture + \"_\" + interpolationFactor + \"_\"", "\\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE) maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults)) extraHeight", "+ x.standardDeviation, metricResults)) extraHeight = 0.0125 * maxVal for bar, error, mean in", "x: x.standardDeviation, metricResults))) labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[\"interpolateLatentSpace\"] + \"_{\\mathrm{conv}}$\"] + \\ [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + (\"_{\\mathrm{dense}}$\"", "fontsize=MINI_FONTSIZE ) plt.tight_layout() plt.savefig(getRecordedResultsRoute(dataset + \"_\" + interpolationFactor + \"_\" + \"metricImageSpace.png\")) plt.close()", "architecture in datasetInfo.ARCH_TYPES: metricResults = [ interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique][\"metricLatentSpace\"] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES ] x", "x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES)) means = np.array(list(map(lambda x: x.mean, metricResults))) stds = np.array(list(map(lambda x:", "plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90) plt.xlabel(\"Proposed Interpolation $\\mathbf{x}$\", fontsize=FONTSIZE) plt.ylabel(\"$\\mathcal{BCE}[\\mathbf{x}_{\\mathrm{centre}}, \\mathbf{x}]$\", fontsize=FONTSIZE) plt.ylim(ymin=0) plt.yticks(fontsize=MINI_FONTSIZE)", "numpy as np import matplotlib.pyplot as plt from evaluation.results import packageResults from dissertation" ]
[ "возникающее при указании полигона, которого нет в АСУ. def __init__(self): text = 'Не", "__init__(self): text = 'Не найдена запись в таблице asu_poligons, соответствующая ' \\ 'этому", "Исключение, возникающее при указании полигона, которого нет в АСУ. def __init__(self): text =", "найдена запись в таблице asu_poligons, соответствующая ' \\ 'этому полигону. Зарегестрируйте ее сначала.'", "Исключения \"\"\" class NoAsuPolygon(Exception): # Исключение, возникающее при указании полигона, которого нет в", "АСУ. def __init__(self): text = 'Не найдена запись в таблице asu_poligons, соответствующая '", "полигона, которого нет в АСУ. def __init__(self): text = 'Не найдена запись в", "которого нет в АСУ. def __init__(self): text = 'Не найдена запись в таблице", "запись в таблице asu_poligons, соответствующая ' \\ 'этому полигону. Зарегестрируйте ее сначала.' super().__init__(text)", "'Не найдена запись в таблице asu_poligons, соответствующая ' \\ 'этому полигону. Зарегестрируйте ее", "text = 'Не найдена запись в таблице asu_poligons, соответствующая ' \\ 'этому полигону.", "class NoAsuPolygon(Exception): # Исключение, возникающее при указании полигона, которого нет в АСУ. def", "# Исключение, возникающее при указании полигона, которого нет в АСУ. def __init__(self): text", "def __init__(self): text = 'Не найдена запись в таблице asu_poligons, соответствующая ' \\", "указании полигона, которого нет в АСУ. def __init__(self): text = 'Не найдена запись", "в АСУ. def __init__(self): text = 'Не найдена запись в таблице asu_poligons, соответствующая", "= 'Не найдена запись в таблице asu_poligons, соответствующая ' \\ 'этому полигону. Зарегестрируйте", "\"\"\" class NoAsuPolygon(Exception): # Исключение, возникающее при указании полигона, которого нет в АСУ.", "нет в АСУ. def __init__(self): text = 'Не найдена запись в таблице asu_poligons,", "при указании полигона, которого нет в АСУ. def __init__(self): text = 'Не найдена", "NoAsuPolygon(Exception): # Исключение, возникающее при указании полигона, которого нет в АСУ. def __init__(self):", "\"\"\" Исключения \"\"\" class NoAsuPolygon(Exception): # Исключение, возникающее при указании полигона, которого нет" ]
[ "def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler,", "methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, )", "threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon =", "methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error',", "import serve from flask import Flask, request import threading import http class StatusServer(object):", "if not self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def", "<filename>images/spark/outbound-relay/status_server.py from waitress import serve from flask import Flask, request import threading import", "view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1,", "http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message @property def", "view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self):", "threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler,", "request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self):", "self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return None return", "= None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start()", "waitress import serve from flask import Flask, request import threading import http class", "StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done", "= Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app,", "threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self): app =", "self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property", "'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self):", "source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return None", "source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '',", "http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK", "view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return", "self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self): app = Flask(__name__)", "return None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property", "self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set():", "app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889,", "return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self):", "return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self): return self.sink_done.is_set() def mark_sink_received_all_data(self): self.sink_done.set()", "None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def", "threading import http class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event()", "import http class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message", "= threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/',", "source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self): return self.sink_done.is_set() def mark_sink_received_all_data(self):", "def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return", "'', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message @property", "if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return", "request import threading import http class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event", "return '', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message", "port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set():", "not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done',", "app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve(", "self.thread.daemon = True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done',", "Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\",", "app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def", "self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK", "self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK", "threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return", "if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not", "http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if not self.source_error_event.is_set():", "@property def source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self):", "'', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if not", "run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"])", "get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if", "from flask import Flask, request import threading import http class StatusServer(object): def __init__(self):", "self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode()", "from waitress import serve from flask import Flask, request import threading import http", "http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set()", "= threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread =", ") def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving',", "= threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon", "serve from flask import Flask, request import threading import http class StatusServer(object): def", "import threading import http class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event =", "self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread", "def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self): return self.sink_done.is_set() def", "def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done =", "= True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler,", "Flask, request import threading import http class StatusServer(object): def __init__(self): self.source_done = threading.Event()", "app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set():", "self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"]) app.add_url_rule('/source_error',", "host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not", "source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set()", "self._source_error_message = None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True", "__init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done = threading.Event()", "self.source_error_event = threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=())", "True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"]) app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=[\"PUT\"])", "def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK", "= request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def", "if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message", "serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK", "http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK", "'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending',", "not self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self):", "= threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self): app", "http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set()", "self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def run(self):", "return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return", "'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '',", "return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def", "threading.Event() self.source_error_event = threading.Event() self._source_error_message = None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run,", "not self.sink_done.is_set(): return 'sending', http.HTTPStatus.OK return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message =", "None self.sink_done = threading.Event() self.thread = threading.Thread(target=self.run, args=()) self.thread.daemon = True self.thread.start() def", "'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self):", "import Flask, request import threading import http class StatusServer(object): def __init__(self): self.source_done =", "self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait()", "flask import Flask, request import threading import http class StatusServer(object): def __init__(self): self.source_done", "methods=[\"PUT\"]) app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=[\"PUT\"]) serve( app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if", "class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message = None", "@property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self): return self.sink_done.is_set()", "http class StatusServer(object): def __init__(self): self.source_done = threading.Event() self.source_error_event = threading.Event() self._source_error_message =", "return 'done', http.HTTPStatus.OK def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def", "args=()) self.thread.daemon = True self.thread.start() def run(self): app = Flask(__name__) app.add_url_rule('/', view_func=self.get_handler, methods=[\"GET\"])", "return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return '', http.HTTPStatus.OK @property def source_error_message(self): if", "def source_error_handler(self): self.source_error_event.set() self._source_error_message = request.data.decode() return '', http.HTTPStatus.OK def source_done_handler(self): self.source_done.set() return", "self._source_error_message @property def source_sent_all_data(self): return self.source_done.is_set() def wait_for_source_sent_all_data(self): self.source_done.wait() @property def sink_received_all_data(self): return", "def source_error_message(self): if not self.source_error_event.is_set(): return None return self._source_error_message @property def source_sent_all_data(self): return", "return 'error', http.HTTPStatus.OK if not self.source_done.is_set(): return 'receiving', http.HTTPStatus.OK if not self.sink_done.is_set(): return", "app, host=\"0.0.0.0\", port=8889, threads=1, ) def get_handler(self): if self.source_error_event.is_set(): return 'error', http.HTTPStatus.OK if" ]
[ "iterate through prices array, keep track of left pointer called currMin. if currProfit", "Complexity: O(n) # Time Complexity: O(n) class Solution: def maxProfit(self, prices: List[int]) ->", "(currProfit > maxProfit): maxProfit = currProfit if (prices[i] < currMin): currMin = prices[i]", "example of Sliding Window algorithm - keeps track of left and right pointers.", "array, keep track of left pointer called currMin. if currProfit > maxProfit, set", "== 1: return 0 currMin = prices[0] maxProfit = 0 for i in", "-> int: if len(prices) == 1: return 0 currMin = prices[0] maxProfit =", "i in range(1, len(prices)): currProfit = (prices[i] - currMin) if (currProfit > maxProfit):", "currProfit = (prices[i] - currMin) if (currProfit > maxProfit): maxProfit = currProfit if", "len(prices) == 1: return 0 currMin = prices[0] maxProfit = 0 for i", "keep going. at the end, return maxProfit # Space Complexity: O(n) # Time", "Complexity: O(n) class Solution: def maxProfit(self, prices: List[int]) -> int: if len(prices) ==", "> maxProfit, set maxProfit = currProfit. if the right pointer (prices[i]) < left", "end, return maxProfit # Space Complexity: O(n) # Time Complexity: O(n) class Solution:", "(prices[i]) < left pointer (currMin), set currMin to the right pointer and keep", "right pointer (prices[i]) < left pointer (currMin), set currMin to the right pointer", "prices array, keep track of left pointer called currMin. if currProfit > maxProfit,", "Solution: def maxProfit(self, prices: List[int]) -> int: if len(prices) == 1: return 0", "Window algorithm - keeps track of left and right pointers. iterate through prices", "- currMin) if (currProfit > maxProfit): maxProfit = currProfit if (prices[i] < currMin):", "0 for i in range(1, len(prices)): currProfit = (prices[i] - currMin) if (currProfit", "keep track of left pointer called currMin. if currProfit > maxProfit, set maxProfit", "pointer (currMin), set currMin to the right pointer and keep going. at the", "called currMin. if currProfit > maxProfit, set maxProfit = currProfit. if the right", "maxProfit): maxProfit = currProfit if (prices[i] < currMin): currMin = prices[i] return maxProfit", "and right pointers. iterate through prices array, keep track of left pointer called", "the right pointer and keep going. at the end, return maxProfit # Space", "prices: List[int]) -> int: if len(prices) == 1: return 0 currMin = prices[0]", "currProfit. if the right pointer (prices[i]) < left pointer (currMin), set currMin to", "left pointer (currMin), set currMin to the right pointer and keep going. at", "currMin) if (currProfit > maxProfit): maxProfit = currProfit if (prices[i] < currMin): currMin", "Space Complexity: O(n) # Time Complexity: O(n) class Solution: def maxProfit(self, prices: List[int])", "if currProfit > maxProfit, set maxProfit = currProfit. if the right pointer (prices[i])", "range(1, len(prices)): currProfit = (prices[i] - currMin) if (currProfit > maxProfit): maxProfit =", "= currProfit. if the right pointer (prices[i]) < left pointer (currMin), set currMin", "currMin. if currProfit > maxProfit, set maxProfit = currProfit. if the right pointer", "= prices[0] maxProfit = 0 for i in range(1, len(prices)): currProfit = (prices[i]", "right pointer and keep going. at the end, return maxProfit # Space Complexity:", "# Space Complexity: O(n) # Time Complexity: O(n) class Solution: def maxProfit(self, prices:", "O(n) # Time Complexity: O(n) class Solution: def maxProfit(self, prices: List[int]) -> int:", "set currMin to the right pointer and keep going. at the end, return", "the end, return maxProfit # Space Complexity: O(n) # Time Complexity: O(n) class", "pointer and keep going. at the end, return maxProfit # Space Complexity: O(n)", "def maxProfit(self, prices: List[int]) -> int: if len(prices) == 1: return 0 currMin", "of Sliding Window algorithm - keeps track of left and right pointers. iterate", "currProfit > maxProfit, set maxProfit = currProfit. if the right pointer (prices[i]) <", "of left and right pointers. iterate through prices array, keep track of left", "if len(prices) == 1: return 0 currMin = prices[0] maxProfit = 0 for", "set maxProfit = currProfit. if the right pointer (prices[i]) < left pointer (currMin),", "keeps track of left and right pointers. iterate through prices array, keep track", "of left pointer called currMin. if currProfit > maxProfit, set maxProfit = currProfit.", "left pointer called currMin. if currProfit > maxProfit, set maxProfit = currProfit. if", "prices[0] maxProfit = 0 for i in range(1, len(prices)): currProfit = (prices[i] -", "Time Complexity: O(n) class Solution: def maxProfit(self, prices: List[int]) -> int: if len(prices)", "pointer (prices[i]) < left pointer (currMin), set currMin to the right pointer and", "currMin to the right pointer and keep going. at the end, return maxProfit", "through prices array, keep track of left pointer called currMin. if currProfit >", "> maxProfit): maxProfit = currProfit if (prices[i] < currMin): currMin = prices[i] return", "- keeps track of left and right pointers. iterate through prices array, keep", "at the end, return maxProfit # Space Complexity: O(n) # Time Complexity: O(n)", "track of left pointer called currMin. if currProfit > maxProfit, set maxProfit =", "going. at the end, return maxProfit # Space Complexity: O(n) # Time Complexity:", "O(n) class Solution: def maxProfit(self, prices: List[int]) -> int: if len(prices) == 1:", "# Time Complexity: O(n) class Solution: def maxProfit(self, prices: List[int]) -> int: if", "right pointers. iterate through prices array, keep track of left pointer called currMin.", "for i in range(1, len(prices)): currProfit = (prices[i] - currMin) if (currProfit >", "maxProfit(self, prices: List[int]) -> int: if len(prices) == 1: return 0 currMin =", "track of left and right pointers. iterate through prices array, keep track of", "< left pointer (currMin), set currMin to the right pointer and keep going.", "algorithm - keeps track of left and right pointers. iterate through prices array,", "left and right pointers. iterate through prices array, keep track of left pointer", "0 currMin = prices[0] maxProfit = 0 for i in range(1, len(prices)): currProfit", "Sliding Window algorithm - keeps track of left and right pointers. iterate through", "return 0 currMin = prices[0] maxProfit = 0 for i in range(1, len(prices)):", "currMin = prices[0] maxProfit = 0 for i in range(1, len(prices)): currProfit =", "Notes: example of Sliding Window algorithm - keeps track of left and right", "if (currProfit > maxProfit): maxProfit = currProfit if (prices[i] < currMin): currMin =", "return maxProfit # Space Complexity: O(n) # Time Complexity: O(n) class Solution: def", "List[int]) -> int: if len(prices) == 1: return 0 currMin = prices[0] maxProfit", "class Solution: def maxProfit(self, prices: List[int]) -> int: if len(prices) == 1: return", "pointers. iterate through prices array, keep track of left pointer called currMin. if", "and keep going. at the end, return maxProfit # Space Complexity: O(n) #", "int: if len(prices) == 1: return 0 currMin = prices[0] maxProfit = 0", "= (prices[i] - currMin) if (currProfit > maxProfit): maxProfit = currProfit if (prices[i]", "= 0 for i in range(1, len(prices)): currProfit = (prices[i] - currMin) if", "1: return 0 currMin = prices[0] maxProfit = 0 for i in range(1,", "maxProfit, set maxProfit = currProfit. if the right pointer (prices[i]) < left pointer", "the right pointer (prices[i]) < left pointer (currMin), set currMin to the right", "(currMin), set currMin to the right pointer and keep going. at the end,", "in range(1, len(prices)): currProfit = (prices[i] - currMin) if (currProfit > maxProfit): maxProfit", "if the right pointer (prices[i]) < left pointer (currMin), set currMin to the", "maxProfit # Space Complexity: O(n) # Time Complexity: O(n) class Solution: def maxProfit(self,", "to the right pointer and keep going. at the end, return maxProfit #", "# Notes: example of Sliding Window algorithm - keeps track of left and", "maxProfit = currProfit. if the right pointer (prices[i]) < left pointer (currMin), set", "len(prices)): currProfit = (prices[i] - currMin) if (currProfit > maxProfit): maxProfit = currProfit", "<gh_stars>0 # Notes: example of Sliding Window algorithm - keeps track of left", "pointer called currMin. if currProfit > maxProfit, set maxProfit = currProfit. if the", "maxProfit = 0 for i in range(1, len(prices)): currProfit = (prices[i] - currMin)", "(prices[i] - currMin) if (currProfit > maxProfit): maxProfit = currProfit if (prices[i] <" ]
[ "norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type self.transform_class = transform_class def", "var): dma, dmi = dist.max(), dist.min() diff = dma - dmi if diff", "if diff < 1e-15: diff = 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var,", "dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes)", "var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if sd", "dist.min() diff = dma - dmi if diff < 1e-15: diff = 1", "if sd == 0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1", "ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist, var):", "<reponame>rgschmitz1/BioDepot-workflow-builder from Orange.data import ContinuousVariable, Domain from Orange.statistics import distribution from Orange.util import", "self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class:", "Orange.util import Reprable from .preprocess import Normalize from .transformation import Normalizer as Norm", "Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation())", "var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return", "return data.transform(domain) def normalize(self, dist, var): if not var.is_continuous: return var elif self.norm_type", "import Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True,", "dmi = dist.max(), dist.min() diff = dma - dmi if diff < 1e-15:", "= data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len],", "dist, var): if not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist,", "self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type self.transform_class =", "= (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if sd == 0: sd", "== Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(),", "= dist.max(), dist.min() diff = dma - dmi if diff < 1e-15: diff", "self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else: return", "return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist,", "return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist,", "for (i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len =", "var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg,", "var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma,", "sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min() diff =", "from Orange.data import ContinuousVariable, Domain from Orange.statistics import distribution from Orange.util import Reprable", "distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars", "self.transform_class = transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i],", "diff = 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff),", "Normalize from .transformation import Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def", "dist.size else (0, 1) if sd == 0: sd = 1 return ContinuousVariable(", "class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type", "return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan:", "[\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based", "dist.max(), dist.min() diff = dma - dmi if diff < 1e-15: diff =", "for (i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain)", "self.normalize(dists[i + attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs,", "(i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def", "def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type", "sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2, 2", "attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas)", "normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min() diff = dma - dmi", "ContinuousVariable, Domain from Orange.statistics import distribution from Orange.util import Reprable from .preprocess import", "var): if not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var)", "1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self,", "avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi =", "normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1)", "zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type self.transform_class = transform_class", "ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2, 2 / diff), sparse=var.sparse, )", "self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size", "def normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min() diff = dma -", "norm_type self.transform_class = transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs = [", "new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i +", "var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars", "avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if sd ==", "= 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse", "sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse )", "[ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if", "new_class_vars = [ self.normalize(dists[i + attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ]", "if dist.size else (0, 1) if sd == 0: sd = 1 return", "enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [", "from Orange.util import Reprable from .preprocess import Normalize from .transformation import Normalizer as", "Reprable from .preprocess import Normalize from .transformation import Normalizer as Norm __all__ =", "= Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if not var.is_continuous:", "def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0,", "= distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ]", "] new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i", "+ attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars,", "ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else: return ContinuousVariable( var.name,", "from .transformation import Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__(", "transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type self.transform_class = transform_class def __call__(self,", "Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type =", "import ContinuousVariable, Domain from Orange.statistics import distribution from Orange.util import Reprable from .preprocess", "1 / diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi)", "sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if sd == 0:", "else (0, 1) if sd == 0: sd = 1 return ContinuousVariable( var.name,", "= 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def", "Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD,", "= [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based =", "var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type ==", "elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist,", "__call__(self, data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i, var)", "attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var) for (i, var)", "1e-15: diff = 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 /", "dmi, 1 / diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma +", "import Reprable from .preprocess import Normalize from .transformation import Normalizer as Norm __all__", "[ self.normalize(dists[i + attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ] domain =", "len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var) for (i, var) in enumerate(data.domain.class_vars)", "import Normalize from .transformation import Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable):", "dmi if diff < 1e-15: diff = 1 if self.zero_based: return ContinuousVariable( var.name,", "= [ self.normalize(dists[i + attr_len], var) for (i, var) in enumerate(data.domain.class_vars) ] domain", "zero_based self.norm_type = norm_type self.transform_class = transform_class def __call__(self, data): dists = distribution.get_distributions(data)", ") def normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min() diff = dma", "compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi", ") else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2, 2 /", "data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var)", "/ diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) /", "diff = dma - dmi if diff < 1e-15: diff = 1 if", "dist, var): dma, dmi = dist.max(), dist.min() diff = dma - dmi if", "1 / sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi = dist.max(),", "new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if not var.is_continuous: return var", "enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var):", "dist.standard_deviation()) if dist.size else (0, 1) if sd == 0: sd = 1", "data.transform(domain) def normalize(self, dist, var): if not var.is_continuous: return var elif self.norm_type ==", "Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ):", "if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var) for", "1) if sd == 0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg,", "(i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes)", "/ sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min()", "dma - dmi if diff < 1e-15: diff = 1 if self.zero_based: return", "= zero_based self.norm_type = norm_type self.transform_class = transform_class def __call__(self, data): dists =", "if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else:", "not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type", "diff < 1e-15: diff = 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi,", "transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for", "= transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var)", "self.norm_type = norm_type self.transform_class = transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs", "sd == 0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 /", "def normalize(self, dist, var): if not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD:", "= len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var) for (i, var) in", "import distribution from Orange.util import Reprable from .preprocess import Normalize from .transformation import", "new_attrs = [ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars =", "in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist,", "return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else: return ContinuousVariable(", "dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if", "data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if not var.is_continuous: return var elif", "- dmi if diff < 1e-15: diff = 1 if self.zero_based: return ContinuousVariable(", "data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i, var) in", "self.zero_based = zero_based self.norm_type = norm_type self.transform_class = transform_class def __call__(self, data): dists", ".preprocess import Normalize from .transformation import Normalizer as Norm __all__ = [\"Normalizer\"] class", "Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self,", "== 0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd),", "1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse )", "compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma", "var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var,", "var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self,", "domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if not", "= [ self.normalize(dists[i], var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars", "def __call__(self, data): dists = distribution.get_distributions(data) new_attrs = [ self.normalize(dists[i], var) for (i,", "Orange.statistics import distribution from Orange.util import Reprable from .preprocess import Normalize from .transformation", "distribution from Orange.util import Reprable from .preprocess import Normalize from .transformation import Normalizer", "sd), sparse=var.sparse ) def normalize_by_span(self, dist, var): dma, dmi = dist.max(), dist.min() diff", "from .preprocess import Normalize from .transformation import Normalizer as Norm __all__ = [\"Normalizer\"]", "if not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif", "var) for (i, var) in enumerate(data.domain.class_vars) ] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return", "(0, 1) if sd == 0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var,", "self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd =", "(dist.mean(), dist.standard_deviation()) if dist.size else (0, 1) if sd == 0: sd =", "self.transform_class: attr_len = len(data.domain.attributes) new_class_vars = [ self.normalize(dists[i + attr_len], var) for (i,", "self.norm_type == Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var)", "< 1e-15: diff = 1 if self.zero_based: return ContinuousVariable( var.name, compute_value=Norm(var, dmi, 1", "else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),", "diff), sparse=var.sparse ) else: return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2,", "return ContinuousVariable( var.name, compute_value=Norm(var, (dma + dmi) / 2, 2 / diff), sparse=var.sparse,", "var) def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else", "0: sd = 1 return ContinuousVariable( var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse", ".transformation import Normalizer as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self,", "== Normalize.NormalizeBySD: return self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def", "self.normalize_by_sd(dist, var) elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var):", "elif self.norm_type == Normalize.NormalizeBySpan: return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd", "Domain from Orange.statistics import distribution from Orange.util import Reprable from .preprocess import Normalize", "): self.zero_based = zero_based self.norm_type = norm_type self.transform_class = transform_class def __call__(self, data):", "= dma - dmi if diff < 1e-15: diff = 1 if self.zero_based:", "var) for (i, var) in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len", "__init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based = zero_based self.norm_type = norm_type self.transform_class", "dma, dmi = dist.max(), dist.min() diff = dma - dmi if diff <", "return self.normalize_by_span(dist, var) def normalize_by_sd(self, dist, var): avg, sd = (dist.mean(), dist.standard_deviation()) if", "as Norm __all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False", "__all__ = [\"Normalizer\"] class Normalizer(Reprable): def __init__( self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False ): self.zero_based", "Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if not var.is_continuous: return", "normalize(self, dist, var): if not var.is_continuous: return var elif self.norm_type == Normalize.NormalizeBySD: return", "= norm_type self.transform_class = transform_class def __call__(self, data): dists = distribution.get_distributions(data) new_attrs =", "] domain = Domain(new_attrs, new_class_vars, data.domain.metas) return data.transform(domain) def normalize(self, dist, var): if", "from Orange.statistics import distribution from Orange.util import Reprable from .preprocess import Normalize from", "Orange.data import ContinuousVariable, Domain from Orange.statistics import distribution from Orange.util import Reprable from", "in enumerate(data.domain.attributes) ] new_class_vars = data.domain.class_vars if self.transform_class: attr_len = len(data.domain.attributes) new_class_vars =" ]
[ "Python\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python :: 3.4\",", "\"Topic :: Software Development :: Libraries :: Python Modules\", ] install_requires = []", "['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\",", "from setuptools import setup classifiers = [ \"License :: OSI Approved :: MIT", "version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers,", "Language :: Python :: 2.7\", \"Programming Language :: Python :: 3.4\", \"Programming Language", "+ install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse", "way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"], setup_requires=setup_requires, tests_require=tests_require, install_requires=install_requires, test_suite='jsane.tests', )", "[ \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python\", \"Programming", ">= '2.7', (\"Requires Python v2.7 or above, get with the \" \"times, grandpa.\")", "the \" \"times, grandpa.\") from setuptools import setup classifiers = [ \"License ::", ":: 3.4\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python ::", "Software Development :: Libraries :: Python Modules\", ] install_requires = [] setup_requires =", "License\", \"Programming Language :: Python\", \"Programming Language :: Python :: 2.7\", \"Programming Language", "'2.7', (\"Requires Python v2.7 or above, get with the \" \"times, grandpa.\") from", "= ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\",", ":: Python :: 3.4\", \"Programming Language :: Python :: 3.5\", \"Programming Language ::", "__version__ assert sys.version >= '2.7', (\"Requires Python v2.7 or above, get with the", "= ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner", "<filename>setup.py #!/usr/bin/env python import sys from jsane import __version__ assert sys.version >= '2.7',", "author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"],", "sys from jsane import __version__ assert sys.version >= '2.7', (\"Requires Python v2.7 or", "Language :: Python :: 3.6\", \"Topic :: Software Development :: Libraries :: Python", "MIT License\", \"Programming Language :: Python\", \"Programming Language :: Python :: 2.7\", \"Programming", "setuptools import setup classifiers = [ \"License :: OSI Approved :: MIT License\",", ":: Python :: 2.7\", \"Programming Language :: Python :: 3.4\", \"Programming Language ::", "'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to", "url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"], setup_requires=setup_requires, tests_require=tests_require,", ":: Python :: 3.6\", \"Topic :: Software Development :: Libraries :: Python Modules\",", "\"Programming Language :: Python :: 2.7\", \"Programming Language :: Python :: 3.4\", \"Programming", "setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\",", "3.4\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\",", ":: Libraries :: Python Modules\", ] install_requires = [] setup_requires = ['pytest-runner'] tests_require", "Approved :: MIT License\", \"Programming Language :: Python\", \"Programming Language :: Python ::", "above, get with the \" \"times, grandpa.\") from setuptools import setup classifiers =", ":: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Topic :: Software", ":: Python\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python ::", "Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Topic :: Software Development", "author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"], setup_requires=setup_requires,", "saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"], setup_requires=setup_requires, tests_require=tests_require, install_requires=install_requires, test_suite='jsane.tests',", "sys.version >= '2.7', (\"Requires Python v2.7 or above, get with the \" \"times,", "import __version__ assert sys.version >= '2.7', (\"Requires Python v2.7 or above, get with", "grandpa.\") from setuptools import setup classifiers = [ \"License :: OSI Approved ::", ":: MIT License\", \"Programming Language :: Python\", \"Programming Language :: Python :: 2.7\",", "Python :: 3.6\", \"Topic :: Software Development :: Libraries :: Python Modules\", ]", ":: 3.5\", \"Programming Language :: Python :: 3.6\", \"Topic :: Software Development ::", "\"Programming Language :: Python :: 3.4\", \"Programming Language :: Python :: 3.5\", \"Programming", "Development :: Libraries :: Python Modules\", ] install_requires = [] setup_requires = ['pytest-runner']", "Python v2.7 or above, get with the \" \"times, grandpa.\") from setuptools import", "[] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__,", ":: OSI Approved :: MIT License\", \"Programming Language :: Python\", \"Programming Language ::", ":: Python Modules\", ] install_requires = [] setup_requires = ['pytest-runner'] tests_require = ['pep8',", "= [ \"License :: OSI Approved :: MIT License\", \"Programming Language :: Python\",", "['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way", "or above, get with the \" \"times, grandpa.\") from setuptools import setup classifiers", "import sys from jsane import __version__ assert sys.version >= '2.7', (\"Requires Python v2.7", "Modules\", ] install_requires = [] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] +", "Python :: 3.4\", \"Programming Language :: Python :: 3.5\", \"Programming Language :: Python", "3.5\", \"Programming Language :: Python :: 3.6\", \"Topic :: Software Development :: Libraries", "install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\",", "v2.7 or above, get with the \" \"times, grandpa.\") from setuptools import setup", "] install_requires = [] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires", "\"License :: OSI Approved :: MIT License\", \"Programming Language :: Python\", \"Programming Language", "description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\", classifiers=classifiers, packages=[\"jsane\"], setup_requires=setup_requires, tests_require=tests_require, install_requires=install_requires,", "\"times, grandpa.\") from setuptools import setup classifiers = [ \"License :: OSI Approved", "(\"Requires Python v2.7 or above, get with the \" \"times, grandpa.\") from setuptools", "\"Programming Language :: Python :: 3.6\", \"Topic :: Software Development :: Libraries ::", "\"Programming Language :: Python\", \"Programming Language :: Python :: 2.7\", \"Programming Language ::", "#!/usr/bin/env python import sys from jsane import __version__ assert sys.version >= '2.7', (\"Requires", "with the \" \"times, grandpa.\") from setuptools import setup classifiers = [ \"License", "3.6\", \"Topic :: Software Development :: Libraries :: Python Modules\", ] install_requires =", "Python :: 2.7\", \"Programming Language :: Python :: 3.4\", \"Programming Language :: Python", ":: Software Development :: Libraries :: Python Modules\", ] install_requires = [] setup_requires", "tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A", "Python Modules\", ] install_requires = [] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest']", "setup classifiers = [ \"License :: OSI Approved :: MIT License\", \"Programming Language", "name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read(), license=\"MIT\",", "jsane import __version__ assert sys.version >= '2.7', (\"Requires Python v2.7 or above, get", "\"Programming Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Topic", ":: 2.7\", \"Programming Language :: Python :: 3.4\", \"Programming Language :: Python ::", "Language :: Python\", \"Programming Language :: Python :: 2.7\", \"Programming Language :: Python", "from jsane import __version__ assert sys.version >= '2.7', (\"Requires Python v2.7 or above,", "Libraries :: Python Modules\", ] install_requires = [] setup_requires = ['pytest-runner'] tests_require =", "install_requires = [] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup(", "OSI Approved :: MIT License\", \"Programming Language :: Python\", \"Programming Language :: Python", ":: 3.6\", \"Topic :: Software Development :: Libraries :: Python Modules\", ] install_requires", "= [] setup_requires = ['pytest-runner'] tests_require = ['pep8', 'pytest'] + install_requires setup( name=\"jsane\",", "assert sys.version >= '2.7', (\"Requires Python v2.7 or above, get with the \"", "Language :: Python :: 3.4\", \"Programming Language :: Python :: 3.5\", \"Programming Language", "Language :: Python :: 3.5\", \"Programming Language :: Python :: 3.6\", \"Topic ::", "import setup classifiers = [ \"License :: OSI Approved :: MIT License\", \"Programming", "2.7\", \"Programming Language :: Python :: 3.4\", \"Programming Language :: Python :: 3.5\",", "\" \"times, grandpa.\") from setuptools import setup classifiers = [ \"License :: OSI", "classifiers = [ \"License :: OSI Approved :: MIT License\", \"Programming Language ::", "get with the \" \"times, grandpa.\") from setuptools import setup classifiers = [", "python import sys from jsane import __version__ assert sys.version >= '2.7', (\"Requires Python", "setup( name=\"jsane\", version=__version__, author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/skorokithakis/jsane/\", description=\"A saner way to parse JSON.\", long_description=open(\"README.rst\").read()," ]
[ "considered to be a real secret. There is no reasonable attack vector for", "= \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID", "<reponame>Lobbelt/specter-desktop \"\"\" Swan API uses PKCE OAuth2. Per Swan's API team: The client", "to be a real secret. There is no reasonable attack vector for this", "secret. There is no reasonable attack vector for this secret being public. \"\"\"", "= \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = ( \"<KEY> ) SWAN_API_URL", "BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class", "secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\"", "this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = (", "( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET =", "Swan API uses PKCE OAuth2. Per Swan's API team: The client secret here", "PKCE OAuth2. Per Swan's API team: The client secret here is not considered", "reasonable attack vector for this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID =", "public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL", "SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig):", "\"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID =", "a real secret. There is no reasonable attack vector for this secret being", "= ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET", ") SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = ( \"<KEY>", "Per Swan's API team: The client secret here is not considered to be", "not considered to be a real secret. There is no reasonable attack vector", "SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\"", "secret here is not considered to be a real secret. There is no", "is no reasonable attack vector for this secret being public. \"\"\" class BaseConfig:", "real secret. There is no reasonable attack vector for this secret being public.", "no reasonable attack vector for this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID", "uses PKCE OAuth2. Per Swan's API team: The client secret here is not", "class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = ( \"<KEY> ) SWAN_API_URL = \"https://api.swanbitcoin.com\"", "\"\"\" Swan API uses PKCE OAuth2. Per Swan's API team: The client secret", "\"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = (", "class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL = \"https://dev-api.swanbitcoin.com\"", "Swan's API team: The client secret here is not considered to be a", "There is no reasonable attack vector for this secret being public. \"\"\" class", "is not considered to be a real secret. There is no reasonable attack", "team: The client secret here is not considered to be a real secret.", "be a real secret. There is no reasonable attack vector for this secret", "vector for this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET", "API team: The client secret here is not considered to be a real", "OAuth2. Per Swan's API team: The client secret here is not considered to", "API uses PKCE OAuth2. Per Swan's API team: The client secret here is", "The client secret here is not considered to be a real secret. There", "here is not considered to be a real secret. There is no reasonable", "for this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET =", "\"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = ( \"<KEY> ) SWAN_API_URL =", "\"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" ) SWAN_API_URL =", "being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\" SWAN_CLIENT_SECRET = ( \"<KEY>\" )", "attack vector for this secret being public. \"\"\" class BaseConfig: SWAN_CLIENT_ID = \"specter-dev\"", "SWAN_API_URL = \"https://dev-api.swanbitcoin.com\" class ProductionConfig(BaseConfig): SWAN_CLIENT_ID = \"specter\" SWAN_CLIENT_SECRET = ( \"<KEY> )", "client secret here is not considered to be a real secret. There is" ]
[ "1 if retry_count == 3: raise Exception(\"Unable to connect to Database after 3", "в БД\"\"\" sql = f\"SELECT * FROM {table} WHERE {column} = %s\" response", "and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return", "closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return retval", "assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из", "email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8')", "def _query_fetch_one(self, query, values): \"\"\" Executes a db query, gets the first value,", "first value, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() #", "# placeholders = ', '.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys()) #", "\"\"\"Удаляем запись о созданной УЗ из БД и проверяем, что email не найден\"\"\"", "self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email,", "db query, gets the first value, and closes the connection. \"\"\" self.cursor.execute(query, values)", "%H:%M:%S\") # Дата для сохранения в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}',", "seconds) between retries. count = 0 while count < retry_count: try: self.conn =", "\"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query,", "для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД", "retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую", "SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def", "в тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для", "сохранения в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s,", "the test_db and closes the connection afterwards. \"\"\" retval = self.cursor.execute(query, values) #", "value, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db()", "'r', encoding='utf-8') as f: # value = json.loads(f.read()) # value.update({ # 'email': email,", "backoff = 1.2 # Time to wait (in seconds) between retries. count =", "count = count + 1 if retry_count == 3: raise Exception(\"Unable to connect", "email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users WHERE email=%s\" assert not", "return except Exception: time.sleep(backoff) count = count + 1 if retry_count == 3:", "while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True)", "database information from mysql_conf.py and creates a connection. \"\"\" import pymysql retry_count =", "= 1.2 # Time to wait (in seconds) between retries. count = 0", "# value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> # }) # return value", "close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД для", "the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return retval def", "activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\" confirmed_time =", "', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql = f\"INSERT INTO {table} (", "\"\"\" Gets database information from mysql_conf.py and creates a connection. \"\"\" import pymysql", "values = list(fake_json.values()) # sql = f\"INSERT INTO {table} ( {columns} ) VALUES", "datetime import time class DBManager: \"\"\"Установка соединения с БД и методы для выполнения", "запись о созданной УЗ из БД и проверяем, что email не найден\"\"\" sql_1", "# Time to wait (in seconds) between retries. count = 0 while count", "query, values): \"\"\" Executes a db query, gets all the values, and closes", "не найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 =", "fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f: # value =", "from mysql_conf.py and creates a connection. \"\"\" import pymysql retry_count = 3 backoff", "sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\"", "= f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql,", "datetime import datetime import time class DBManager: \"\"\"Установка соединения с БД и методы", "3: raise Exception(\"Unable to connect to Database after 3 retries.\") def _query_fetch_all(self, query,", "с БД и методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database", "'email': email, # 'encrypted_password': <PASSWORD> # }) # return value # # def", "_query_fetch_one(self, query, values): \"\"\" Executes a db query, gets the first value, and", "pass_hash): \"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")", "= \"SELECT email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod", "= self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о", "email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod # def", "email) sql_2 = \"SELECT email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email)", "passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count =", "pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f: # value = json.loads(f.read()) #", "db query, gets all the values, and closes the connection. \"\"\" self.cursor.execute(query, values)", "value # # def insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s'] *", "creates a connection. \"\"\" import pymysql retry_count = 3 backoff = 1.2 #", "# return value # # def insert_new_row_into_db(self, fake_json, table): # placeholders = ',", "response = self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись", "_query_fetch_all(self, query, values): \"\"\" Executes a db query, gets all the values, and", "self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения", "delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из БД и проверяем, что email", "из БД и проверяем, что email не найден\"\"\" sql_1 = \"DELETE FROM users", "should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql = f\"SELECT", "3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a db query, gets all", "datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД sql = f\"UPDATE users SET", "WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users WHERE email=%s\" assert", "'encrypted_password': <PASSWORD> # }) # return value # # def insert_new_row_into_db(self, fake_json, table):", "def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql =", "для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py and", "with open(path, 'r', encoding='utf-8') as f: # value = json.loads(f.read()) # value.update({ #", "\"\"\" Executes a db query, gets all the values, and closes the connection.", "value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> # }) # return value #", "для сохранения в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\", "= self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self,", "self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной", "email, # 'encrypted_password': <PASSWORD> # }) # return value # # def insert_new_row_into_db(self,", "', '.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys()) # values = list(fake_json.values())", "query, values): \"\"\" Executes a query to the test_db and closes the connection", "# value = json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> #", "# @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as", "= list(fake_json.values()) # sql = f\"INSERT INTO {table} ( {columns} ) VALUES (", "value, column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql = f\"SELECT *", "соединения с БД и методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets", "f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем", "Gets database information from mysql_conf.py and creates a connection. \"\"\" import pymysql retry_count", "value = json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> # })", "\"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users", "information from mysql_conf.py and creates a connection. \"\"\" import pymysql retry_count = 3", "return retval def _query_fetch_one(self, query, values): \"\"\" Executes a db query, gets the", "sql = f\"SELECT * FROM {table} WHERE {column} = %s\" response = self._query_fetch_one(sql,", "\"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query,", "изменения в тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата", "БД и методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information", "email, pass_hash): \"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d", "def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД", "= f\"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders} );\" # self.execute_query(sql,", "sql_2 = \"SELECT email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) #", "WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой", "self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r',", "to wait (in seconds) between retries. count = 0 while count < retry_count:", "retval def _execute_query(self, query, values): \"\"\" Executes a query to the test_db and", "# self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим", "+ 1 if retry_count == 3: raise Exception(\"Unable to connect to Database after", "= self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query, values): \"\"\" Executes a", "connect to Database after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a", "наличие новой записи в БД\"\"\" sql = f\"SELECT * FROM {table} WHERE {column}", "(in seconds) between retries. count = 0 while count < retry_count: try: self.conn", "def __init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py and creates a connection.", "# Дата для сохранения в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}',", "в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1", "о созданной УЗ из БД и проверяем, что email не найден\"\"\" sql_1 =", "= pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception:", "\"\"\" Executes a db query, gets the first value, and closes the connection.", "\"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close()", "self.conn.cursor() return except Exception: time.sleep(backoff) count = count + 1 if retry_count ==", "the first value, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone()", "retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor()", "БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в", "(pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\"", "f: # value = json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password': <PASSWORD>", "return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в", "FROM {table} WHERE {column} = %s\" response = self._query_fetch_one(sql, value) assert value in", "try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return", "def _execute_query(self, query, values): \"\"\" Executes a query to the test_db and closes", "between retries. count = 0 while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server,", "closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return retval", "# self.__close_db() return retval def _execute_query(self, query, values): \"\"\" Executes a query to", "class DBManager: \"\"\"Установка соединения с БД и методы для выполнения запросов\"\"\" def __init__(self,", "retries. count = 0 while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user,", "БД и проверяем, что email не найден\"\"\" sql_1 = \"DELETE FROM users WHERE", "= json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> # }) #", "to connect to Database after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes", "Дата для сохранения в БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \"", "\"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") #", "self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query, values):", "list(fake_json.values()) # sql = f\"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders}", "count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor", "users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email))", "Executes a db query, gets all the values, and closes the connection. \"\"\"", "БД\"\"\" sql = f\"SELECT * FROM {table} WHERE {column} = %s\" response =", "value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из БД", "f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash,", "\"SELECT email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod #", "json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password': <PASSWORD> # }) # return", "и методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information from", "test_db and closes the connection afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db()", "import time class DBManager: \"\"\"Установка соединения с БД и методы для выполнения запросов\"\"\"", "WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'):", "query, values): \"\"\" Executes a db query, gets the first value, and closes", "'.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys()) # values = list(fake_json.values()) #", "= \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM", "* len(fake_json)) # columns = ', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql", "< retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor =", "запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py and creates a", "self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД для активации", "Exception(\"Unable to connect to Database after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\"", "0 while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port)", "import datetime import time class DBManager: \"\"\"Установка соединения с БД и методы для", "a db query, gets all the values, and closes the connection. \"\"\" self.cursor.execute(query,", "def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\" confirmed_time", "== 3: raise Exception(\"Unable to connect to Database after 3 retries.\") def _query_fetch_all(self,", "encoding='utf-8') as f: # value = json.loads(f.read()) # value.update({ # 'email': email, #", "placeholders = ', '.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys()) # values", "= self.conn.cursor() return except Exception: time.sleep(backoff) count = count + 1 if retry_count", "len(fake_json)) # columns = ', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql =", "open(path, 'r', encoding='utf-8') as f: # value = json.loads(f.read()) # value.update({ # 'email':", "DBManager: \"\"\"Установка соединения с БД и методы для выполнения запросов\"\"\" def __init__(self, DBModel):", "Executes a db query, gets the first value, and closes the connection. \"\"\"", "= count + 1 if retry_count == 3: raise Exception(\"Unable to connect to", "записи в БД\"\"\" sql = f\"SELECT * FROM {table} WHERE {column} = %s\"", "retry_count == 3: raise Exception(\"Unable to connect to Database after 3 retries.\") def", "email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи", "self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count = count + 1 if", "что email не найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email)", "values): \"\"\" Executes a db query, gets all the values, and closes the", "table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql = f\"SELECT * FROM {table}", "тестовую БД для активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения", "retval = self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\" Executes", "a db query, gets the first value, and closes the connection. \"\"\" self.cursor.execute(query,", "as f: # value = json.loads(f.read()) # value.update({ # 'email': email, # 'encrypted_password':", "sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email", "БД sql = f\"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE", "retval def _query_fetch_one(self, query, values): \"\"\" Executes a db query, gets the first", "columns = ', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql = f\"INSERT INTO", "in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из БД и", "УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД sql =", "* FROM {table} WHERE {column} = %s\" response = self._query_fetch_one(sql, value) assert value", "a connection. \"\"\" import pymysql retry_count = 3 backoff = 1.2 # Time", "self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\" Executes a db", "assert not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with", "time.sleep(backoff) count = count + 1 if retry_count == 3: raise Exception(\"Unable to", "after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a db query, gets", "\"\"\"Проверяем наличие новой записи в БД\"\"\" sql = f\"SELECT * FROM {table} WHERE", "email): \"\"\"Удаляем запись о созданной УЗ из БД и проверяем, что email не", "{column} = %s\" response = self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self,", "response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из БД и проверяем,", "from datetime import datetime import time class DBManager: \"\"\"Установка соединения с БД и", "def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ из БД и проверяем, что", "self.__close_db() return retval def _execute_query(self, query, values): \"\"\" Executes a query to the", "insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s'] * len(fake_json)) # columns =", "gets all the values, and closes the connection. \"\"\" self.cursor.execute(query, values) retval =", "'.join(fake_json.keys()) # values = list(fake_json.values()) # sql = f\"INSERT INTO {table} ( {columns}", "column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql = f\"SELECT * FROM", "and creates a connection. \"\"\" import pymysql retry_count = 3 backoff = 1.2", "users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email',", "@staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f:", "confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self,", "# def insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s'] * len(fake_json)) #", "Exception: time.sleep(backoff) count = count + 1 if retry_count == 3: raise Exception(\"Unable", "query, gets the first value, and closes the connection. \"\"\" self.cursor.execute(query, values) retval", "self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count = count + 1", "Database after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a db query,", "def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f: # value", "to the test_db and closes the connection afterwards. \"\"\" retval = self.cursor.execute(query, values)", "retval = self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query, values): \"\"\" Executes", "}) # return value # # def insert_new_row_into_db(self, fake_json, table): # placeholders =", "# 'email': email, # 'encrypted_password': <PASSWORD> # }) # return value # #", "port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count = count +", "FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path',", "values) retval = self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\"", "retry_count = 3 backoff = 1.2 # Time to wait (in seconds) between", "\" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column,", "новой записи в БД\"\"\" sql = f\"SELECT * FROM {table} WHERE {column} =", "созданной УЗ из БД и проверяем, что email не найден\"\"\" sql_1 = \"DELETE", "count = 0 while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass,", "count + 1 if retry_count == 3: raise Exception(\"Unable to connect to Database", "values): \"\"\" Executes a query to the test_db and closes the connection afterwards.", "# sql = f\"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders} );\"", "value) assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем запись о созданной УЗ", "connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self,", "table): # placeholders = ', '.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys())", "методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py", "= ', '.join(['%s'] * len(fake_json)) # columns = ', '.join(fake_json.keys()) # values =", "Executes a query to the test_db and closes the connection afterwards. \"\"\" retval", "self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query, values): \"\"\" Executes a query", "the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return retval def", "user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count", "найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT", "confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД sql = f\"UPDATE", "query, gets all the values, and closes the connection. \"\"\" self.cursor.execute(query, values) retval", "WHERE {column} = %s\" response = self._query_fetch_one(sql, value) assert value in response def", "_execute_query(self, query, values): \"\"\" Executes a query to the test_db and closes the", "self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except", "query to the test_db and closes the connection afterwards. \"\"\" retval = self.cursor.execute(query,", "f\"SELECT * FROM {table} WHERE {column} = %s\" response = self._query_fetch_one(sql, value) assert", "retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a db query, gets all the", "# 'encrypted_password': <PASSWORD> # }) # return value # # def insert_new_row_into_db(self, fake_json,", "the values, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() #", "a query to the test_db and closes the connection afterwards. \"\"\" retval =", "<PASSWORD> # }) # return value # # def insert_new_row_into_db(self, fake_json, table): #", "return retval def _execute_query(self, query, values): \"\"\" Executes a query to the test_db", "# self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\" Executes a db query,", "mysql_conf.py and creates a connection. \"\"\" import pymysql retry_count = 3 backoff =", "self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users WHERE email=%s\" assert not self._query_fetch_one(sql_2,", "closes the connection afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return retval", "afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self): self.cursor.close()", "not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path,", "выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py and creates", "def _query_fetch_all(self, query, values): \"\"\" Executes a db query, gets all the values,", "and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return", "wait (in seconds) between retries. count = 0 while count < retry_count: try:", "values): \"\"\" Executes a db query, gets the first value, and closes the", "= self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\" Executes a", "db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff) count = count", "self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи в", "# def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f: #", "# values = list(fake_json.values()) # sql = f\"INSERT INTO {table} ( {columns} )", "\"\"\" Executes a query to the test_db and closes the connection afterwards. \"\"\"", "email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие новой записи в БД\"\"\" sql", "values) # self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def activate_new_account_db(self, email, pass_hash):", "sql = f\"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders} );\" #", "# # def insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s'] * len(fake_json))", "connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self,", "return value # # def insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s']", "<filename>tests_ui/helpers/db_actions.py from datetime import datetime import time class DBManager: \"\"\"Установка соединения с БД", "\"\"\" import pymysql retry_count = 3 backoff = 1.2 # Time to wait", "3 backoff = 1.2 # Time to wait (in seconds) between retries. count", "проверяем, что email не найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1,", "активации УЗ\"\"\" confirmed_time = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД sql", "# with open(path, 'r', encoding='utf-8') as f: # value = json.loads(f.read()) # value.update({", "import pymysql retry_count = 3 backoff = 1.2 # Time to wait (in", "pymysql retry_count = 3 backoff = 1.2 # Time to wait (in seconds)", "# columns = ', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql = f\"INSERT", "gets the first value, and closes the connection. \"\"\" self.cursor.execute(query, values) retval =", "= datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\") # Дата для сохранения в БД sql = f\"UPDATE users", "и проверяем, что email не найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\"", "DBModel): \"\"\" Gets database information from mysql_conf.py and creates a connection. \"\"\" import", "connection afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self):", "the connection afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return retval def", "time class DBManager: \"\"\"Установка соединения с БД и методы для выполнения запросов\"\"\" def", "values) retval = self.cursor.fetchone() # self.__close_db() return retval def _execute_query(self, query, values): \"\"\"", "connection. \"\"\" import pymysql retry_count = 3 backoff = 1.2 # Time to", "\\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table):", "email=%s\" assert not self._query_fetch_one(sql_2, email) # @staticmethod # def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'): #", "УЗ из БД и проверяем, что email не найден\"\"\" sql_1 = \"DELETE FROM", "# }) # return value # # def insert_new_row_into_db(self, fake_json, table): # placeholders", "email='email', pass_hash='<PASSWORD>'): # with open(path, 'r', encoding='utf-8') as f: # value = json.loads(f.read())", "and closes the connection afterwards. \"\"\" retval = self.cursor.execute(query, values) # self.__close_db() return", "= 0 while count < retry_count: try: self.conn = pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema,", "Time to wait (in seconds) between retries. count = 0 while count <", "def insert_new_row_into_db(self, fake_json, table): # placeholders = ', '.join(['%s'] * len(fake_json)) # columns", "f\"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders} );\" # self.execute_query(sql, values)", "email не найден\"\"\" sql_1 = \"DELETE FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2", "FROM users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users WHERE", "1.2 # Time to wait (in seconds) between retries. count = 0 while", "pymysql.connect(host=DBModel.db_server, user=DBModel.db_user, passwd=DBModel.db_pass, db=DBModel.db_schema, port=DBModel.db_port) self.conn.autocommit(True) self.cursor = self.conn.cursor() return except Exception: time.sleep(backoff)", "self.conn.close() def activate_new_account_db(self, email, pass_hash): \"\"\"Вносим изменения в тестовую БД для активации УЗ\"\"\"", "users WHERE email=%s\" self._execute_query(sql_1, email) sql_2 = \"SELECT email FROM users WHERE email=%s\"", "retval = self.cursor.execute(query, values) # self.__close_db() return retval def close_db(self): self.cursor.close() self.conn.close() def", "all the values, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall()", "= f\"SELECT * FROM {table} WHERE {column} = %s\" response = self._query_fetch_one(sql, value)", "= %s\" response = self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self, email):", "sms_confirmed_at='{confirmed_time}', \" \\ f\"encrypted_password=%s, first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value,", "if retry_count == 3: raise Exception(\"Unable to connect to Database after 3 retries.\")", "= 3 backoff = 1.2 # Time to wait (in seconds) between retries.", "= ', '.join(fake_json.keys()) # values = list(fake_json.values()) # sql = f\"INSERT INTO {table}", "to Database after 3 retries.\") def _query_fetch_all(self, query, values): \"\"\" Executes a db", "raise Exception(\"Unable to connect to Database after 3 retries.\") def _query_fetch_all(self, query, values):", "\"\"\"Установка соединения с БД и методы для выполнения запросов\"\"\" def __init__(self, DBModel): \"\"\"", "first_password_changed=1 WHERE email=%s\" self._execute_query(sql, (pass_hash, email)) def should_be_new_record_into_db(self, value, column, table): \"\"\"Проверяем наличие", "self.__close_db() return retval def _query_fetch_one(self, query, values): \"\"\" Executes a db query, gets", "fake_json, table): # placeholders = ', '.join(['%s'] * len(fake_json)) # columns = ',", "__init__(self, DBModel): \"\"\" Gets database information from mysql_conf.py and creates a connection. \"\"\"", "values, and closes the connection. \"\"\" self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db()", "except Exception: time.sleep(backoff) count = count + 1 if retry_count == 3: raise", "self.cursor.execute(query, values) retval = self.cursor.fetchall() # self.__close_db() return retval def _query_fetch_one(self, query, values):", "%s\" response = self._query_fetch_one(sql, value) assert value in response def delete_new_account_from_db(self, email): \"\"\"Удаляем", "{table} WHERE {column} = %s\" response = self._query_fetch_one(sql, value) assert value in response" ]
[ "line = line.rstrip() words = line.split() for word in words: if word not", "= input(\"Enter file name: \") fh = open(fname) lst = list() for line", "= list() for line in fh: words = line.split() for word in words:", "input(\"Enter file name: \") fh = open(fname) lst = list() for line in", "lst = list() for line in fh: words = line.split() for word in", "\") fh = open(fname) lst = list() for line in fh: line =", "in fh: line = line.rstrip() words = line.split() for word in words: if", "name: \") fh = open(fname) lst = list() for line in fh: line", "if word in lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname =", "fh: line = line.rstrip() words = line.split() for word in words: if word", "= line.rstrip() words = line.split() for word in words: if word not in", "#solution 1 fname = input(\"Enter file name: \") fh = open(fname) lst =", "words: if word in lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname", "fh: words = line.split() for word in words: if word in lst: continue", "= line.split() for word in words: if word in lst: continue else: lst.append(word)", "= list() for line in fh: line = line.rstrip() words = line.split() for", "print(lst) #solution 2 fname = input(\"Enter file name: \") fh = open(fname) lst", "in fh: words = line.split() for word in words: if word in lst:", "1 fname = input(\"Enter file name: \") fh = open(fname) lst = list()", "#solution 2 fname = input(\"Enter file name: \") fh = open(fname) lst =", "\") fh = open(fname) lst = list() for line in fh: words =", "list() for line in fh: line = line.rstrip() words = line.split() for word", "line in fh: words = line.split() for word in words: if word in", "for word in words: if word in lst: continue else: lst.append(word) lst.sort() print(lst)", "open(fname) lst = list() for line in fh: line = line.rstrip() words =", "words = line.split() for word in words: if word not in lst: lst.append(word)", "in words: if word in lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2", "line.split() for word in words: if word in lst: continue else: lst.append(word) lst.sort()", "for line in fh: words = line.split() for word in words: if word", "for line in fh: line = line.rstrip() words = line.split() for word in", "fname = input(\"Enter file name: \") fh = open(fname) lst = list() for", "word in words: if word in lst: continue else: lst.append(word) lst.sort() print(lst) #solution", "fh = open(fname) lst = list() for line in fh: words = line.split()", "line in fh: line = line.rstrip() words = line.split() for word in words:", "line.rstrip() words = line.split() for word in words: if word not in lst:", "else: lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter file name: \") fh", "list() for line in fh: words = line.split() for word in words: if", "word in lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter", "lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter file name:", "line.split() for word in words: if word not in lst: lst.append(word) lst.sort() print(lst)", "file name: \") fh = open(fname) lst = list() for line in fh:", "in lst: continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter file", "lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter file name: \") fh =", "lst.sort() print(lst) #solution 2 fname = input(\"Enter file name: \") fh = open(fname)", "name: \") fh = open(fname) lst = list() for line in fh: words", "= line.split() for word in words: if word not in lst: lst.append(word) lst.sort()", "fh = open(fname) lst = list() for line in fh: line = line.rstrip()", "2 fname = input(\"Enter file name: \") fh = open(fname) lst = list()", "<reponame>YChanHuang/Python-for-every-body-notes<filename>ch8/ex8.4.py #solution 1 fname = input(\"Enter file name: \") fh = open(fname) lst", "continue else: lst.append(word) lst.sort() print(lst) #solution 2 fname = input(\"Enter file name: \")", "= open(fname) lst = list() for line in fh: words = line.split() for", "open(fname) lst = list() for line in fh: words = line.split() for word", "= open(fname) lst = list() for line in fh: line = line.rstrip() words", "lst = list() for line in fh: line = line.rstrip() words = line.split()", "words = line.split() for word in words: if word in lst: continue else:" ]
[ "{'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time", "import json from dataclasses import dataclass from typing import Optional @dataclass() class Event:", "= value @property def description(self) -> Optional[str]: return self._description @description.setter def description(self, value:", "@property def description(self) -> Optional[str]: return self._description @description.setter def description(self, value: str): self._description", "object: obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\":", "def start_time(self, value: str): self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc':", "name: str _start_time: str = None _end_time = None _description = None #", "def description(self, value: str): self._description = value def to_json(self) -> object: obj =", "of constants.EVENT_SOURCE source_id: str # Unique ID from the source name: str _start_time:", "'2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str):", "str): self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def", "None _end_time = None _description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc':", "def description(self) -> Optional[str]: return self._description @description.setter def description(self, value: str): self._description =", "@description.setter def description(self, value: str): self._description = value def to_json(self) -> object: obj", "# One of constants.EVENT_SOURCE source_id: str # Unique ID from the source name:", "obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time,", "str # Unique ID from the source name: str _start_time: str = None", "value @property def description(self) -> Optional[str]: return self._description @description.setter def description(self, value: str):", "from the source name: str _start_time: str = None _end_time = None _description", "Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str): self._start_time = value # {'timezone':", "'2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str):", "None _description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def", "'2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self,", "@start_time.setter def start_time(self, value: str): self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00',", "def end_time(self, value: str): self._end_time = value @property def description(self) -> Optional[str]: return", "Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str): self._end_time = value @property def", "str): self._end_time = value @property def description(self) -> Optional[str]: return self._description @description.setter def", "description(self, value: str): self._description = value def to_json(self) -> object: obj = {", "Unique ID from the source name: str _start_time: str = None _end_time =", "constants.EVENT_SOURCE source_id: str # Unique ID from the source name: str _start_time: str", "json from dataclasses import dataclass from typing import Optional @dataclass() class Event: source:", "_start_time: str = None _end_time = None _description = None # {'timezone': 'America/Denver',", "_end_time = None _description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'}", "end_time(self, value: str): self._end_time = value @property def description(self) -> Optional[str]: return self._description", "value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]:", "One of constants.EVENT_SOURCE source_id: str # Unique ID from the source name: str", "dataclass from typing import Optional @dataclass() class Event: source: str # One of", "= None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) ->", "self._description = value def to_json(self) -> object: obj = { \"source_id\": self.source_id, \"source\":", "# Unique ID from the source name: str _start_time: str = None _end_time", "self._start_time @start_time.setter def start_time(self, value: str): self._start_time = value # {'timezone': 'America/Denver', 'local':", "return self._end_time @end_time.setter def end_time(self, value: str): self._end_time = value @property def description(self)", "{ \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time, \"description\": self.description", "-> Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str): self._start_time = value #", "value def to_json(self) -> object: obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\":", "from dataclasses import dataclass from typing import Optional @dataclass() class Event: source: str", "return self._start_time @start_time.setter def start_time(self, value: str): self._start_time = value # {'timezone': 'America/Denver',", "@property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str): self._start_time", "self._end_time = value @property def description(self) -> Optional[str]: return self._description @description.setter def description(self,", "= { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time, \"description\":", "= None _end_time = None _description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00',", "_description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self)", "end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str): self._end_time = value", "return self._description @description.setter def description(self, value: str): self._description = value def to_json(self) ->", "value: str): self._end_time = value @property def description(self) -> Optional[str]: return self._description @description.setter", "-> object: obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time,", "\"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time, \"description\": self.description } return json.dumps(obj)", "source: str # One of constants.EVENT_SOURCE source_id: str # Unique ID from the", "= None _description = None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property", "self._description @description.setter def description(self, value: str): self._description = value def to_json(self) -> object:", "ID from the source name: str _start_time: str = None _end_time = None", "Optional[str]: return self._description @description.setter def description(self, value: str): self._description = value def to_json(self)", "@property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str): self._end_time", "str): self._description = value def to_json(self) -> object: obj = { \"source_id\": self.source_id,", "Event: source: str # One of constants.EVENT_SOURCE source_id: str # Unique ID from", "def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str): self._end_time =", "typing import Optional @dataclass() class Event: source: str # One of constants.EVENT_SOURCE source_id:", "source name: str _start_time: str = None _end_time = None _description = None", "start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str): self._start_time = value", "value: str): self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property", "self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self)", "to_json(self) -> object: obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\":", "= value def to_json(self) -> object: obj = { \"source_id\": self.source_id, \"source\": self.source,", "start_time(self, value: str): self._start_time = value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'}", "# {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return", "@end_time.setter def end_time(self, value: str): self._end_time = value @property def description(self) -> Optional[str]:", "import Optional @dataclass() class Event: source: str # One of constants.EVENT_SOURCE source_id: str", "\"source_id\": self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time, \"description\": self.description }", "-> Optional[str]: return self._end_time @end_time.setter def end_time(self, value: str): self._end_time = value @property", "self._end_time @end_time.setter def end_time(self, value: str): self._end_time = value @property def description(self) ->", "import dataclass from typing import Optional @dataclass() class Event: source: str # One", "dataclasses import dataclass from typing import Optional @dataclass() class Event: source: str #", "str # One of constants.EVENT_SOURCE source_id: str # Unique ID from the source", "'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def", "'2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self,", "Optional @dataclass() class Event: source: str # One of constants.EVENT_SOURCE source_id: str #", "class Event: source: str # One of constants.EVENT_SOURCE source_id: str # Unique ID", "value: str): self._description = value def to_json(self) -> object: obj = { \"source_id\":", "# {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return", "@dataclass() class Event: source: str # One of constants.EVENT_SOURCE source_id: str # Unique", "'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter", "None # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]:", "'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self, value:", "str = None _end_time = None _description = None # {'timezone': 'America/Denver', 'local':", "'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def", "self.source_id, \"source\": self.source, \"name\": self.name, \"start_time\": self.start_time, \"end_time\": self.end_time, \"description\": self.description } return", "= value # {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) ->", "source_id: str # Unique ID from the source name: str _start_time: str =", "str _start_time: str = None _end_time = None _description = None # {'timezone':", "{'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time", "'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter def end_time(self, value:", "description(self) -> Optional[str]: return self._description @description.setter def description(self, value: str): self._description = value", "def start_time(self) -> Optional[str]: return self._start_time @start_time.setter def start_time(self, value: str): self._start_time =", "'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'} @property def end_time(self) -> Optional[str]: return self._end_time @end_time.setter", "from typing import Optional @dataclass() class Event: source: str # One of constants.EVENT_SOURCE", "def to_json(self) -> object: obj = { \"source_id\": self.source_id, \"source\": self.source, \"name\": self.name,", "the source name: str _start_time: str = None _end_time = None _description =", "-> Optional[str]: return self._description @description.setter def description(self, value: str): self._description = value def" ]
[ "master_doc = 'index' # -- General configuration --------------------------------------------------- # Add any Sphinx extension", "contain templates here, relative to this directory. templates_path = ['_templates'] # List of", "'r') as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' #", "['_templates'] # List of patterns, relative to source directory, that match files and", "directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that", "looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns", "templates here, relative to this directory. templates_path = ['_templates'] # List of patterns,", "author = '<NAME>' # The full version, including alpha/beta/rc tags with open('../VERSION', 'r')", "Add any paths that contain templates here, relative to this directory. templates_path =", "files and # directories to ignore when looking for source files. # This", "copyright = '2020, <NAME>' author = '<NAME>' # The full version, including alpha/beta/rc", "This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] html_static_path = ['_static']", "patterns, relative to source directory, that match files and # directories to ignore", "Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright = '2020,", "any paths that contain templates here, relative to this directory. templates_path = ['_templates']", "release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration", "be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones.", "# List of patterns, relative to source directory, that match files and #", "# -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here,", "relative to this directory. templates_path = ['_templates'] # List of patterns, relative to", "configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They", "List of patterns, relative to source directory, that match files and # directories", "(named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add", "] # Add any paths that contain templates here, relative to this directory.", "names here, as strings. They can be # extensions coming with Sphinx (named", "open('../VERSION', 'r') as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index'", "source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = []", "that contain templates here, relative to this directory. templates_path = ['_templates'] # List", "# Configuration file for the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud", "# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions", "The full version, including alpha/beta/rc tags with open('../VERSION', 'r') as stream: release =", "match files and # directories to ignore when looking for source files. #", "alpha/beta/rc tags with open('../VERSION', 'r') as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\"", "any Sphinx extension module names here, as strings. They can be # extensions", "directories to ignore when looking for source files. # This pattern also affects", "for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns =", "= 'Cloud Optimized Fits' copyright = '2020, <NAME>' author = '<NAME>' # The", "https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright = '2020, <NAME>' author = '<NAME>'", "# Add any Sphinx extension module names here, as strings. They can be", "files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] html_static_path", "Configuration file for the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized", "directory, that match files and # directories to ignore when looking for source", "Sphinx extension module names here, as strings. They can be # extensions coming", "= '2020, <NAME>' author = '<NAME>' # The full version, including alpha/beta/rc tags", "your custom # ones. extensions = [ ] # Add any paths that", "can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom #", "html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration --------------------------------------------------- # Add", "for the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright", "= \"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration --------------------------------------------------- # Add any", "paths that contain templates here, relative to this directory. templates_path = ['_templates'] #", "full version, including alpha/beta/rc tags with open('../VERSION', 'r') as stream: release = stream.read()", "# https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright = '2020, <NAME>' author =", "'2020, <NAME>' author = '<NAME>' # The full version, including alpha/beta/rc tags with", "of patterns, relative to source directory, that match files and # directories to", "as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or", "module names here, as strings. They can be # extensions coming with Sphinx", "to source directory, that match files and # directories to ignore when looking", "here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*')", "'<NAME>' # The full version, including alpha/beta/rc tags with open('../VERSION', 'r') as stream:", "or your custom # ones. extensions = [ ] # Add any paths", "to ignore when looking for source files. # This pattern also affects html_static_path", "Fits' copyright = '2020, <NAME>' author = '<NAME>' # The full version, including", "project = 'Cloud Optimized Fits' copyright = '2020, <NAME>' author = '<NAME>' #", "when looking for source files. # This pattern also affects html_static_path and html_extra_path.", "# Add any paths that contain templates here, relative to this directory. templates_path", "= '<NAME>' # The full version, including alpha/beta/rc tags with open('../VERSION', 'r') as", "# This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] html_static_path =", "<reponame>jbcurtin/cloud-fits # Configuration file for the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project =", "version, including alpha/beta/rc tags with open('../VERSION', 'r') as stream: release = stream.read() html_theme", "as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # --", "stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # -- General", "<NAME>' author = '<NAME>' # The full version, including alpha/beta/rc tags with open('../VERSION',", "relative to source directory, that match files and # directories to ignore when", "= stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration ---------------------------------------------------", "coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [", "file for the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits'", "ones. extensions = [ ] # Add any paths that contain templates here,", "the Sphinx documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright =", "with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ]", "tags with open('../VERSION', 'r') as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc", "# The full version, including alpha/beta/rc tags with open('../VERSION', 'r') as stream: release", "'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any", "custom # ones. extensions = [ ] # Add any paths that contain", "builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright = '2020, <NAME>' author", "to this directory. templates_path = ['_templates'] # List of patterns, relative to source", "Optimized Fits' copyright = '2020, <NAME>' author = '<NAME>' # The full version,", "and # directories to ignore when looking for source files. # This pattern", "that match files and # directories to ignore when looking for source files.", "with open('../VERSION', 'r') as stream: release = stream.read() html_theme = \"sphinx_rtd_theme\" master_doc =", "--------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can", "templates_path = ['_templates'] # List of patterns, relative to source directory, that match", "'Cloud Optimized Fits' copyright = '2020, <NAME>' author = '<NAME>' # The full", "strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your", "including alpha/beta/rc tags with open('../VERSION', 'r') as stream: release = stream.read() html_theme =", "[ ] # Add any paths that contain templates here, relative to this", "= 'index' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module", "documentation builder. # https://www.sphinx-doc.org/en/master/usage/configuration.html project = 'Cloud Optimized Fits' copyright = '2020, <NAME>'", "# directories to ignore when looking for source files. # This pattern also", "General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings.", "-- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as", "this directory. templates_path = ['_templates'] # List of patterns, relative to source directory,", "extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions =", "They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom", "'index' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names", "extension module names here, as strings. They can be # extensions coming with", "ignore when looking for source files. # This pattern also affects html_static_path and", "# ones. extensions = [ ] # Add any paths that contain templates", "extensions = [ ] # Add any paths that contain templates here, relative", "= ['_templates'] # List of patterns, relative to source directory, that match files", "= [ ] # Add any paths that contain templates here, relative to", "here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative", "\"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration --------------------------------------------------- # Add any Sphinx", "Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] #", "source directory, that match files and # directories to ignore when looking for", "stream.read() html_theme = \"sphinx_rtd_theme\" master_doc = 'index' # -- General configuration --------------------------------------------------- #", "Add any Sphinx extension module names here, as strings. They can be #" ]
[ "'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i", "'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing (%s).' %", "to try typing in the lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx)", "range(3): stdscr.move(uly + ncols + 2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear()", "test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly, ulx = 3, 2 if", "try typing in the lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr,", "insert_mode) contents = box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text entered in", "contents = box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text entered in the", "+ ncols + 2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False)", "mode' else: mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to", "ncols + 2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr,", "= textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text", "2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch()", "insert_mode=False): ncols, nlines = 8, 3 uly, ulx = 3, 2 if insert_mode:", "1, uly + nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents", "curses import textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly, ulx", "- 3, ulx, 'Use Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly -", "'Be sure to try typing in the lower-right corner.') win = curses.newwin(nlines, ncols,", "ulx = 3, 2 if insert_mode: mode = 'insert mode' else: mode =", "'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G", "stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols + 2,", "ulx, 'Use Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly - 2, ulx,", "'Use Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be", "- 1, uly + nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode)", "- 2, ulx, 'Be sure to try typing in the lower-right corner.') win", "ulx, 'Be sure to try typing in the lower-right corner.') win = curses.newwin(nlines,", "mode) stdscr.addstr(uly - 2, ulx, 'Be sure to try typing in the lower-right", "textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text entered", "any key') stdscr.getch() for i in range(3): stdscr.move(uly + ncols + 2 +", "2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True) if", "end editing (%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be sure to try", "ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1, uly + nlines, ulx +", "+ 2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True)", "= curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1, uly", "uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1, uly + nlines, ulx", "curses from curses import textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3", "= 8, 3 uly, ulx = 3, 2 if insert_mode: mode = 'insert", "for i in range(3): stdscr.move(uly + ncols + 2 + i, 0) stdscr.clrtoeol()", "= 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing (%s).'", "stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True) if __name__ == '__main__': curses.wrapper(main)", "+ nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit()", "in the lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly -", "= 'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use", "ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1, uly + nlines,", "typing in the lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly", "stdscr.addstr(uly - 2, ulx, 'Be sure to try typing in the lower-right corner.')", "def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly, ulx = 3, 2", "if insert_mode: mode = 'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly -", "2, ulx, 'Be sure to try typing in the lower-right corner.') win =", "0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True) if __name__ == '__main__':", "else: mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end", "+ ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols", "mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing (%s).' % mode)", "+ 2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key')", "ncols + 2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any", "3 uly, ulx = 3, 2 if insert_mode: mode = 'insert mode' else:", "nlines = 8, 3 uly, ulx = 3, 2 if insert_mode: mode =", "ncols, nlines = 8, 3 uly, ulx = 3, 2 if insert_mode: mode", "1, ulx - 1, uly + nlines, ulx + ncols) stdscr.refresh() box =", "mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing", "uly - 1, ulx - 1, uly + nlines, ulx + ncols) stdscr.refresh()", "3, 2 if insert_mode: mode = 'insert mode' else: mode = 'overwrite mode'", "lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx", "textpad.rectangle(stdscr, uly - 1, ulx - 1, uly + nlines, ulx + ncols)", "from curses import textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly,", "= box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text entered in the box\\n')", "uly, ulx = 3, 2 if insert_mode: mode = 'insert mode' else: mode", "stdscr.move(uly + ncols + 2 + i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr,", "import curses from curses import textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8,", "ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols +", "corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx -", "- 1, ulx - 1, uly + nlines, ulx + ncols) stdscr.refresh() box", "box.edit() stdscr.addstr(uly + ncols + 2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents))", "insert_mode: mode = 'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly - 3,", "win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1,", "box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly + ncols + 2, 0,", "i in range(3): stdscr.move(uly + ncols + 2 + i, 0) stdscr.clrtoeol() def", "key') stdscr.getch() for i in range(3): stdscr.move(uly + ncols + 2 + i,", "2 if insert_mode: mode = 'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly", "editing (%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be sure to try typing", "ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly +", "= 3, 2 if insert_mode: mode = 'insert mode' else: mode = 'overwrite", "stdscr.addstr('Press any key') stdscr.getch() for i in range(3): stdscr.move(uly + ncols + 2", "stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly", "entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in", "ulx - 1, uly + nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win,", "box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in range(3): stdscr.move(uly +", "i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True) if __name__ ==", "nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents = box.edit() stdscr.addstr(uly", "Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be sure", "8, 3 uly, ulx = 3, 2 if insert_mode: mode = 'insert mode'", "uly + nlines, ulx + ncols) stdscr.refresh() box = textpad.Textbox(win, insert_mode) contents =", "textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly, ulx = 3,", "3, ulx, 'Use Ctrl-G to end editing (%s).' % mode) stdscr.addstr(uly - 2,", "mode = 'insert mode' else: mode = 'overwrite mode' stdscr.addstr(uly - 3, ulx,", "% mode) stdscr.addstr(uly - 2, ulx, 'Be sure to try typing in the", "import textpad def test_textpad(stdscr, insert_mode=False): ncols, nlines = 8, 3 uly, ulx =", "sure to try typing in the lower-right corner.') win = curses.newwin(nlines, ncols, uly,", "stdscr.addstr(uly + ncols + 2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n')", "0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for", "to end editing (%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be sure to", "(%s).' % mode) stdscr.addstr(uly - 2, ulx, 'Be sure to try typing in", "stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in range(3): stdscr.move(uly + ncols +", "stdscr.getch() for i in range(3): stdscr.move(uly + ncols + 2 + i, 0)", "the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in range(3): stdscr.move(uly", "curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1, ulx - 1, uly +", "the lower-right corner.') win = curses.newwin(nlines, ncols, uly, ulx) textpad.rectangle(stdscr, uly - 1,", "stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in range(3): stdscr.move(uly + ncols", "in range(3): stdscr.move(uly + ncols + 2 + i, 0) stdscr.clrtoeol() def main(stdscr):", "+ i, 0) stdscr.clrtoeol() def main(stdscr): stdscr.clear() test_textpad(stdscr, False) test_textpad(stdscr, True) if __name__", "+ ncols + 2, 0, 'Text entered in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press", "in the box\\n') stdscr.addstr(repr(contents)) stdscr.addstr('\\n') stdscr.addstr('Press any key') stdscr.getch() for i in range(3):" ]
[ "return self.theta def main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta", "x * x + b * x + c p1 = plt.plot(x_data, y_data,", "x + b * x + c p1 = plt.plot(x_data, y_data, '*r') p2", "solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b, c = theta print('fitted coefficient", "sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c + noise return sampled_x,", "plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs Fitted", "c p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]),", "self.cost())) J = self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @ J, -J.T", "self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat @", "* sampled_x + quadratic_b * sampled_x + quadratic_c + noise return sampled_x, sampled_y", "self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @ J, -J.T @ r) self.theta", "10., num_data) sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x", "* x + b * x + c p1 = plt.plot(x_data, y_data, '*r')", "= np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b", "return r.T @ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in", "plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted", "('sampled data', 'fitted function')) plt.title('Data points vs Fitted curve') plt.show() if __name__ ==", "+ noise return sampled_x, sampled_y class LeastSquares: def __init__(self, x, y): self.x =", "coefficient (a,b,c):', theta.transpose()) pred_y = a * x * x + b *", "cost(self): r = self.residual() return r.T @ r def compute_jacobian(self): return self.data_mat def", "self.y = y self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta =", "= -2. quadratic_c = 1. num_data = 30 noise = 2 * np.random.randn(num_data)", "np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat @ self.theta r = pred_y", "pred_y = self.data_mat @ self.theta r = pred_y - self.y return r def", "= 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a *", "pred_y - self.y return r def cost(self): r = self.residual() return r.T @", "@ J, -J.T @ r) self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged", "np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat @ self.theta", "print('converged iteration: {} cost: {}'.format( i, self.cost())) break return self.theta def main(): x_data,", "= np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def", "12., 100) a, b, c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y =", "quadratic_b * sampled_x + quadratic_c + noise return sampled_x, sampled_y class LeastSquares: def", "* sampled_x + quadratic_c + noise return sampled_x, sampled_y class LeastSquares: def __init__(self,", "* self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self): pred_y =", "compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in range(10): print('iteration: {} cost: {}'.format(i,", "matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c =", "generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100)", "c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a * x *", "self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.])", "i in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r =", "theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a * x * x +", "np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x * sampled_x", "num_data) sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x +", "= self.data_mat @ self.theta r = pred_y - self.y return r def cost(self):", "x self.y = y self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta", "self.x = x self.y = y self.data_mat = np.vstack( [self.x * self.x, self.x,", "@ self.theta r = pred_y - self.y return r def cost(self): r =", "sampled_y class LeastSquares: def __init__(self, x, y): self.x = x self.y = y", "x, y): self.x = x self.y = y self.data_mat = np.vstack( [self.x *", "= pred_y - self.y return r def cost(self): r = self.residual() return r.T", "{}'.format( i, self.cost())) break return self.theta def main(): x_data, y_data = generate_quadratic_data() solver", "r = self.residual() return r.T @ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self):", "print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a * x * x + b", "p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs Fitted curve') plt.show() if __name__", "b * x + c p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x,", "* sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c + noise return", "LeastSquares: def __init__(self, x, y): self.x = x self.y = y self.data_mat =", "return r def cost(self): r = self.residual() return r.T @ r def compute_jacobian(self):", "for i in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r", "def residual(self): pred_y = self.data_mat @ self.theta r = pred_y - self.y return", "y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b, c =", "'fitted function')) plt.title('Data points vs Fitted curve') plt.show() if __name__ == \"__main__\": main()", "p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points", "= 30 noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y", "residual(self): pred_y = self.data_mat @ self.theta r = pred_y - self.y return r", "= 2.4 quadratic_b = -2. quadratic_c = 1. num_data = 30 noise =", "plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs Fitted curve') plt.show() if", "generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c = 1. num_data = 30", "self.theta r = pred_y - self.y return r def cost(self): r = self.residual()", "solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100) a,", "quadratic_b = -2. quadratic_c = 1. num_data = 30 noise = 2 *", "y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function'))", "100) a, b, c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a", "= a * x * x + b * x + c p1", "= plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs", "data', 'fitted function')) plt.title('Data points vs Fitted curve') plt.show() if __name__ == \"__main__\":", "{} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T", "pred_y = a * x * x + b * x + c", "[self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self): pred_y", "+ quadratic_b * sampled_x + quadratic_c + noise return sampled_x, sampled_y class LeastSquares:", "quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c + noise", "b, c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a * x", "r) self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format(", "sampled_x + quadratic_c + noise return sampled_x, sampled_y class LeastSquares: def __init__(self, x,", "cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @", "np.linalg.solve(J.T @ J, -J.T @ r) self.theta += delta if np.linalg.norm(delta) < 1e-8:", "= quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c +", "= self.residual() return r.T @ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for", "def least_squares_solve(self): for i in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J =", "cost: {}'.format( i, self.cost())) break return self.theta def main(): x_data, y_data = generate_quadratic_data()", "+ c p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0],", "2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x", "__init__(self, x, y): self.x = x self.y = y self.data_mat = np.vstack( [self.x", "= x self.y = y self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T", "np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self):", "a * x * x + b * x + c p1 =", "x + c p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g')", "'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs Fitted curve') plt.show()", "= theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a * x * x", "def main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve()", "y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12,", "* x + c p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y,", "theta.transpose()) pred_y = a * x * x + b * x +", "x = np.linspace(-12, 12., 100) a, b, c = theta print('fitted coefficient (a,b,c):',", "y self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0,", "self.y return r def cost(self): r = self.residual() return r.T @ r def", "def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c = 1. num_data =", "import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c", "+ b * x + c p1 = plt.plot(x_data, y_data, '*r') p2 =", "noise return sampled_x, sampled_y class LeastSquares: def __init__(self, x, y): self.x = x", "x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x =", "- self.y return r def cost(self): r = self.residual() return r.T @ r", "np.linspace(-12, 12., 100) a, b, c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y", "+= delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost()))", "def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in range(10): print('iteration: {} cost:", "r.T @ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in range(10):", "np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost())) break return self.theta", "0, 0.]) def residual(self): pred_y = self.data_mat @ self.theta r = pred_y -", "np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b *", "class LeastSquares: def __init__(self, x, y): self.x = x self.y = y self.data_mat", "delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost())) break", "LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b, c", "p1 = plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled", "self.data_mat def least_squares_solve(self): for i in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J", "sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c", "iteration: {} cost: {}'.format( i, self.cost())) break return self.theta def main(): x_data, y_data", "theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b, c = theta", "sampled_x, sampled_y class LeastSquares: def __init__(self, x, y): self.x = x self.y =", "self.cost())) break return self.theta def main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data,", "self.theta = np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat @ self.theta r", "self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format( i,", "as np import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b =", "return sampled_x, sampled_y class LeastSquares: def __init__(self, x, y): self.x = x self.y", "num_data = 30 noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data)", "y): self.x = x self.y = y self.data_mat = np.vstack( [self.x * self.x,", "2.4 quadratic_b = -2. quadratic_c = 1. num_data = 30 noise = 2", "self.data_mat @ self.theta r = pred_y - self.y return r def cost(self): r", "{}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @ J,", "least_squares_solve(self): for i in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian()", "1. num_data = 30 noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10.,", "a, b, c = theta print('fitted coefficient (a,b,c):', theta.transpose()) pred_y = a *", "@ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in range(10): print('iteration:", "= LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b,", "'*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data", "< 1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost())) break return self.theta def", "print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual() delta =", "def cost(self): r = self.residual() return r.T @ r def compute_jacobian(self): return self.data_mat", "r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i in range(10): print('iteration: {}", "noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a", "quadratic_a = 2.4 quadratic_b = -2. quadratic_c = 1. num_data = 30 noise", "= self.residual() delta = np.linalg.solve(J.T @ J, -J.T @ r) self.theta += delta", "* x * x + b * x + c p1 = plt.plot(x_data,", "quadratic_c + noise return sampled_x, sampled_y class LeastSquares: def __init__(self, x, y): self.x", "break return self.theta def main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data)", "= np.linspace(-12, 12., 100) a, b, c = theta print('fitted coefficient (a,b,c):', theta.transpose())", "main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x", "numpy as np import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b", "30 noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y =", "sampled_x + quadratic_b * sampled_x + quadratic_c + noise return sampled_x, sampled_y class", "import numpy as np import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4", "= generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta = solver.least_squares_solve() x = np.linspace(-12, 12.,", "pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function')) plt.title('Data points vs Fitted curve')", "sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x * sampled_x +", "self.theta def main(): x_data, y_data = generate_quadratic_data() solver = LeastSquares(x_data, y_data) theta =", "= np.linalg.solve(J.T @ J, -J.T @ r) self.theta += delta if np.linalg.norm(delta) <", "= self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @ J, -J.T @ r)", "<gh_stars>1-10 import numpy as np import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a =", "r = pred_y - self.y return r def cost(self): r = self.residual() return", "@ r) self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost:", "= np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat @ self.theta r =", "J = self.compute_jacobian() r = self.residual() delta = np.linalg.solve(J.T @ J, -J.T @", "def __init__(self, x, y): self.x = x self.y = y self.data_mat = np.vstack(", "in range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual()", "range(10): print('iteration: {} cost: {}'.format(i, self.cost())) J = self.compute_jacobian() r = self.residual() delta", "r = self.residual() delta = np.linalg.solve(J.T @ J, -J.T @ r) self.theta +=", "{} cost: {}'.format( i, self.cost())) break return self.theta def main(): x_data, y_data =", "quadratic_c = 1. num_data = 30 noise = 2 * np.random.randn(num_data) sampled_x =", "if np.linalg.norm(delta) < 1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost())) break return", "self.residual() return r.T @ r def compute_jacobian(self): return self.data_mat def least_squares_solve(self): for i", "as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c = 1.", "= plt.plot(x_data, y_data, '*r') p2 = plt.plot(x, pred_y, 'g') plt.legend((p1[0], p2[0]), ('sampled data',", "r def cost(self): r = self.residual() return r.T @ r def compute_jacobian(self): return", "np import matplotlib.pyplot as plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2.", "i, self.cost())) break return self.theta def main(): x_data, y_data = generate_quadratic_data() solver =", "self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0, 0, 0.]) def residual(self): pred_y = self.data_mat", "-2. quadratic_c = 1. num_data = 30 noise = 2 * np.random.randn(num_data) sampled_x", "= 1. num_data = 30 noise = 2 * np.random.randn(num_data) sampled_x = np.linspace(-10,", "+ quadratic_c + noise return sampled_x, sampled_y class LeastSquares: def __init__(self, x, y):", "self.residual() delta = np.linalg.solve(J.T @ J, -J.T @ r) self.theta += delta if", "(a,b,c):', theta.transpose()) pred_y = a * x * x + b * x", "J, -J.T @ r) self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged iteration:", "-J.T @ r) self.theta += delta if np.linalg.norm(delta) < 1e-8: print('converged iteration: {}", "* np.random.randn(num_data) sampled_x = np.linspace(-10, 10., num_data) sampled_y = quadratic_a * sampled_x *", "0.]) def residual(self): pred_y = self.data_mat @ self.theta r = pred_y - self.y", "1e-8: print('converged iteration: {} cost: {}'.format( i, self.cost())) break return self.theta def main():", "= solver.least_squares_solve() x = np.linspace(-12, 12., 100) a, b, c = theta print('fitted", "return self.data_mat def least_squares_solve(self): for i in range(10): print('iteration: {} cost: {}'.format(i, self.cost()))", "= y self.data_mat = np.vstack( [self.x * self.x, self.x, np.ones_like(self.x)]).T self.theta = np.array([0,", "delta = np.linalg.solve(J.T @ J, -J.T @ r) self.theta += delta if np.linalg.norm(delta)", "plt def generate_quadratic_data(): quadratic_a = 2.4 quadratic_b = -2. quadratic_c = 1. num_data" ]
[ "BotAction. # noqa: E501 :type action_type: str :param strategic_category: The strategic_category of this", "description: The description of this BotAction. # noqa: E501 :type description: str :param", "the action_type of this BotAction. :return: The action_type of this BotAction. :rtype: str", "description: str): \"\"\"Sets the description of this BotAction. :param description: The description of", "ValueError(\"Invalid value for `action_type`, must not be `None`\") # noqa: E501 self._action_type =", "return self._description @description.setter def description(self, description: str): \"\"\"Sets the description of this BotAction.", "the strategic_category of this BotAction. :param strategic_category: The strategic_category of this BotAction. :type", "action_type: The action_type of this BotAction. # noqa: E501 :type action_type: str :param", "data: Dict=None): # noqa: E501 \"\"\"BotAction - a model defined in Swagger :param", "{ 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = {", "- a model defined in Swagger :param description: The description of this BotAction.", "self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description", ":return: The action_type of this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def", "'BotAction': \"\"\"Returns the dict as a model :param dikt: A dict. :type: dict", ":param description: The description of this BotAction. :type description: str \"\"\" self._description =", "this BotAction. :rtype: Dict \"\"\" return self._data @data.setter def data(self, data: Dict): \"\"\"Sets", "action_type: str \"\"\" if action_type is None: raise ValueError(\"Invalid value for `action_type`, must", "absolute_import from datetime import date, datetime # noqa: F401 from typing import List,", "import util class BotAction(Model): \"\"\"NOTE: This class is auto generated by the swagger", "\"\"\"Gets the description of this BotAction. :return: The description of this BotAction. :rtype:", "Dict \"\"\" return self._data @data.setter def data(self, data: Dict): \"\"\"Sets the data of", "from typing import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model from", "strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param strategic_category: The strategic_category", "BotAction(Model): \"\"\"NOTE: This class is auto generated by the swagger code generator program.", "of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory):", "\"\"\"Gets the data of this BotAction. :return: The data of this BotAction. :rtype:", "description of this BotAction. :rtype: str \"\"\" return self._description @description.setter def description(self, description:", ":rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type", "str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction - a", "data: The data of this BotAction. :type data: Dict \"\"\" self._data = data", ":param data: The data of this BotAction. # noqa: E501 :type data: Dict", "= data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as a", "-> str: \"\"\"Gets the action_type of this BotAction. :return: The action_type of this", "self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param", "def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return: The strategic_category", "The description of this BotAction. :type description: str \"\"\" self._description = description @property", "the strategic_category of this BotAction. :return: The strategic_category of this BotAction. :rtype: StrategicCategory", "None: raise ValueError(\"Invalid value for `action_type`, must not be `None`\") # noqa: E501", ":rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category", "this BotAction. :return: The action_type of this BotAction. :rtype: str \"\"\" return self._action_type", "description of this BotAction. :param description: The description of this BotAction. :type description:", "`None`\") # noqa: E501 self._action_type = action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets", "of this BotAction. :rtype: str \"\"\" return self._description @description.setter def description(self, description: str):", "# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime #", "of this BotAction. :type description: str \"\"\" self._description = description @property def action_type(self)", "from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as a model :param dikt: A", "this BotAction. :param data: The data of this BotAction. :type data: Dict \"\"\"", "strategic_category: The strategic_category of this BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param", "of this BotAction. :return: The data of this BotAction. :rtype: Dict \"\"\" return", ":param action_type: The action_type of this BotAction. :type action_type: str \"\"\" if action_type", "# noqa: E501 :type data: Dict \"\"\" self.swagger_types = { 'description': str, 'action_type':", "E501 self._action_type = action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of", "of this BotAction. :param strategic_category: The strategic_category of this BotAction. :type strategic_category: StrategicCategory", "datetime # noqa: F401 from typing import List, Dict # noqa: F401 from", "noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self) -> str:", "= strategic_category @property def data(self) -> Dict: \"\"\"Gets the data of this BotAction.", "\"\"\"NOTE: This class is auto generated by the swagger code generator program. Do", "must not be `None`\") # noqa: E501 self._action_type = action_type @property def strategic_category(self)", "'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = { 'description':", ":param dikt: A dict. :type: dict :return: The BotAction of this BotAction. #", "this BotAction. # noqa: E501 :type data: Dict \"\"\" self.swagger_types = { 'description':", "def data(self) -> Dict: \"\"\"Gets the data of this BotAction. :return: The data", "= action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction.", "strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param strategic_category: The strategic_category of", "'strategicCategory', 'data': 'data' } self._description = description self._action_type = action_type self._strategic_category = strategic_category", "# noqa: E501 self._action_type = action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the", "str \"\"\" if action_type is None: raise ValueError(\"Invalid value for `action_type`, must not", "not edit the class manually. \"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category:", "str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = { 'description': 'description', 'action_type': 'actionType',", "Swagger :param description: The description of this BotAction. # noqa: E501 :type description:", "this BotAction. :return: The data of this BotAction. :rtype: Dict \"\"\" return self._data", "@property def description(self) -> str: \"\"\"Gets the description of this BotAction. :return: The", "data of this BotAction. :param data: The data of this BotAction. :type data:", "self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type of this BotAction. :param", "StrategicCategory :param data: The data of this BotAction. # noqa: E501 :type data:", "\"\"\" return util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets the description of", "str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type of", "of this BotAction. :return: The action_type of this BotAction. :rtype: str \"\"\" return", ":rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets the", "action_type(self, action_type: str): \"\"\"Sets the action_type of this BotAction. :param action_type: The action_type", "value for `action_type`, must not be `None`\") # noqa: E501 self._action_type = action_type", "\"\"\"Gets the strategic_category of this BotAction. :return: The strategic_category of this BotAction. :rtype:", "noqa: E501 :type data: Dict \"\"\" self.swagger_types = { 'description': str, 'action_type': str,", "StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self) -> Dict: \"\"\"Gets the data", "= { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description =", "BotAction. :return: The action_type of this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter", "of this BotAction. :return: The description of this BotAction. :rtype: str \"\"\" return", "str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = { 'description': 'description',", "cls) @property def description(self) -> str: \"\"\"Gets the description of this BotAction. :return:", "Dict=None): # noqa: E501 \"\"\"BotAction - a model defined in Swagger :param description:", "self._strategic_category = strategic_category self._data = data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns", "of this BotAction. :param action_type: The action_type of this BotAction. :type action_type: str", "= description self._action_type = action_type self._strategic_category = strategic_category self._data = data @classmethod def", "= action_type self._strategic_category = strategic_category self._data = data @classmethod def from_dict(cls, dikt) ->", "# noqa: F401 from typing import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_", "action_type is None: raise ValueError(\"Invalid value for `action_type`, must not be `None`\") #", "self._action_type = action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this", "The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self,", "this BotAction. :return: The description of this BotAction. :rtype: str \"\"\" return self._description", ":param data: The data of this BotAction. :type data: Dict \"\"\" self._data =", "this BotAction. :param description: The description of this BotAction. :type description: str \"\"\"", "rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE:", "this BotAction. :param action_type: The action_type of this BotAction. :type action_type: str \"\"\"", "dict. :type: dict :return: The BotAction of this BotAction. # noqa: E501 :rtype:", "return util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets the description of this", "datetime import date, datetime # noqa: F401 from typing import List, Dict #", ":type strategic_category: StrategicCategory :param data: The data of this BotAction. # noqa: E501", "dikt: A dict. :type: dict :return: The BotAction of this BotAction. # noqa:", "action_type self._strategic_category = strategic_category self._data = data @classmethod def from_dict(cls, dikt) -> 'BotAction':", "@property def action_type(self) -> str: \"\"\"Gets the action_type of this BotAction. :return: The", "raise ValueError(\"Invalid value for `action_type`, must not be `None`\") # noqa: E501 self._action_type", "description of this BotAction. # noqa: E501 :type description: str :param action_type: The", "return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction.", "noqa: F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class is auto", ":type data: Dict \"\"\" self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory,", "'data': Dict } self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data':", "description: str :param action_type: The action_type of this BotAction. # noqa: E501 :type", "\"\"\" self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict }", "noqa: E501 \"\"\"BotAction - a model defined in Swagger :param description: The description", "Do not edit the class manually. \"\"\" def __init__(self, description: str=None, action_type: str=None,", "action_type of this BotAction. :type action_type: str \"\"\" if action_type is None: raise", "\"\"\"Gets the action_type of this BotAction. :return: The action_type of this BotAction. :rtype:", "strategic_category of this BotAction. :param strategic_category: The strategic_category of this BotAction. :type strategic_category:", "this BotAction. # noqa: E501 :type action_type: str :param strategic_category: The strategic_category of", "str: \"\"\"Gets the action_type of this BotAction. :return: The action_type of this BotAction.", "the class manually. \"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data:", "self._data @data.setter def data(self, data: Dict): \"\"\"Sets the data of this BotAction. :param", "rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server import", "\"\"\"Sets the description of this BotAction. :param description: The description of this BotAction.", "StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction - a model defined in Swagger", "\"\"\" if action_type is None: raise ValueError(\"Invalid value for `action_type`, must not be", "List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory", "from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server", "the description of this BotAction. :param description: The description of this BotAction. :type", "BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets the description", "BotAction. :param strategic_category: The strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category", "Dict): \"\"\"Sets the data of this BotAction. :param data: The data of this", "# noqa: E501 :type action_type: str :param strategic_category: The strategic_category of this BotAction.", "the dict as a model :param dikt: A dict. :type: dict :return: The", "noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501", ":type action_type: str \"\"\" if action_type is None: raise ValueError(\"Invalid value for `action_type`,", "# noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa:", "self._data = data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as", "data: Dict \"\"\" self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data':", "def action_type(self) -> str: \"\"\"Gets the action_type of this BotAction. :return: The action_type", "this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets", "strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction - a model defined in", "util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets the description of this BotAction.", ":type description: str \"\"\" self._description = description @property def action_type(self) -> str: \"\"\"Gets", "self._description = description @property def action_type(self) -> str: \"\"\"Gets the action_type of this", "the data of this BotAction. :return: The data of this BotAction. :rtype: Dict", "BotAction. :return: The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter", "date, datetime # noqa: F401 from typing import List, Dict # noqa: F401", "from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class is auto generated by", "noqa: E501 :type description: str :param action_type: The action_type of this BotAction. #", "by the swagger code generator program. Do not edit the class manually. \"\"\"", "of this BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param data: The data", "import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import", "description @property def action_type(self) -> str: \"\"\"Gets the action_type of this BotAction. :return:", ":return: The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def", "BotAction. :param description: The description of this BotAction. :type description: str \"\"\" self._description", "BotAction. :rtype: str \"\"\" return self._description @description.setter def description(self, description: str): \"\"\"Sets the", "\"\"\" self._description = description @property def action_type(self) -> str: \"\"\"Gets the action_type of", "def action_type(self, action_type: str): \"\"\"Sets the action_type of this BotAction. :param action_type: The", "action_type: str): \"\"\"Sets the action_type of this BotAction. :param action_type: The action_type of", "BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the", "def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as a model :param dikt:", "self._action_type = action_type self._strategic_category = strategic_category self._data = data @classmethod def from_dict(cls, dikt)", "BotAction. # noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self)", "'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description self._action_type = action_type self._strategic_category =", "BotAction. # noqa: E501 :type description: str :param action_type: The action_type of this", "__init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction", "of this BotAction. :param data: The data of this BotAction. :type data: Dict", "str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction - a model defined", "this BotAction. :param strategic_category: The strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\"", "code generator program. Do not edit the class manually. \"\"\" def __init__(self, description:", "# noqa: F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class is", "The description of this BotAction. :rtype: str \"\"\" return self._description @description.setter def description(self,", "'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category':", "of this BotAction. # noqa: E501 :type description: str :param action_type: The action_type", "the description of this BotAction. :return: The description of this BotAction. :rtype: str", "@property def data(self) -> Dict: \"\"\"Gets the data of this BotAction. :return: The", "be `None`\") # noqa: E501 self._action_type = action_type @property def strategic_category(self) -> StrategicCategory:", "description of this BotAction. :type description: str \"\"\" self._description = description @property def", "= { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map =", "model defined in Swagger :param description: The description of this BotAction. # noqa:", "BotAction. :type action_type: str \"\"\" if action_type is None: raise ValueError(\"Invalid value for", "edit the class manually. \"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None,", "Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server import util class", "action_type(self) -> str: \"\"\"Gets the action_type of this BotAction. :return: The action_type of", "\"\"\"Sets the action_type of this BotAction. :param action_type: The action_type of this BotAction.", ":type description: str :param action_type: The action_type of this BotAction. # noqa: E501", "noqa: F401 from typing import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import", "this BotAction. # noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def", "\"\"\" self._strategic_category = strategic_category @property def data(self) -> Dict: \"\"\"Gets the data of", "this BotAction. :return: The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category", "import StrategicCategory # noqa: F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This", "# noqa: E501 \"\"\"BotAction - a model defined in Swagger :param description: The", "'data': 'data' } self._description = description self._action_type = action_type self._strategic_category = strategic_category self._data", ":param strategic_category: The strategic_category of this BotAction. # noqa: E501 :type strategic_category: StrategicCategory", "str \"\"\" self._description = description @property def action_type(self) -> str: \"\"\"Gets the action_type", "strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category:", "BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param data: The data of this", "E501 :type strategic_category: StrategicCategory :param data: The data of this BotAction. # noqa:", "# noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self) ->", "of this BotAction. # noqa: E501 :type action_type: str :param strategic_category: The strategic_category", ":type action_type: str :param strategic_category: The strategic_category of this BotAction. # noqa: E501", "action_type of this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type:", "action_type of this BotAction. :param action_type: The action_type of this BotAction. :type action_type:", "generated by the swagger code generator program. Do not edit the class manually.", "E501 :type data: Dict \"\"\" self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category':", "this BotAction. :rtype: StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets", "swagger code generator program. Do not edit the class manually. \"\"\" def __init__(self,", "@data.setter def data(self, data: Dict): \"\"\"Sets the data of this BotAction. :param data:", "str: \"\"\"Gets the description of this BotAction. :return: The description of this BotAction.", "import date, datetime # noqa: F401 from typing import List, Dict # noqa:", "strategic_category: The strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category", "return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type of this BotAction.", "def description(self, description: str): \"\"\"Sets the description of this BotAction. :param description: The", "@strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param strategic_category:", "strategic_category of this BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param data: The", "self._description = description self._action_type = action_type self._strategic_category = strategic_category self._data = data @classmethod", "E501 :type description: str :param action_type: The action_type of this BotAction. # noqa:", "if action_type is None: raise ValueError(\"Invalid value for `action_type`, must not be `None`\")", "-> 'BotAction': \"\"\"Returns the dict as a model :param dikt: A dict. :type:", "E501 \"\"\"BotAction - a model defined in Swagger :param description: The description of", "this BotAction. # noqa: E501 :type description: str :param action_type: The action_type of", "BotAction. :return: The description of this BotAction. :rtype: str \"\"\" return self._description @description.setter", "dict :return: The BotAction of this BotAction. # noqa: E501 :rtype: BotAction \"\"\"", "description(self, description: str): \"\"\"Sets the description of this BotAction. :param description: The description", "StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param strategic_category: The strategic_category of this", "str \"\"\" return self._description @description.setter def description(self, description: str): \"\"\"Sets the description of", "program. Do not edit the class manually. \"\"\" def __init__(self, description: str=None, action_type:", "This class is auto generated by the swagger code generator program. Do not", "\"\"\"Returns the dict as a model :param dikt: A dict. :type: dict :return:", ":rtype: Dict \"\"\" return self._data @data.setter def data(self, data: Dict): \"\"\"Sets the data", "as a model :param dikt: A dict. :type: dict :return: The BotAction of", "\"\"\"BotAction - a model defined in Swagger :param description: The description of this", "data(self) -> Dict: \"\"\"Gets the data of this BotAction. :return: The data of", "strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def", "class is auto generated by the swagger code generator program. Do not edit", "strategic_category: StrategicCategory :param data: The data of this BotAction. # noqa: E501 :type", "= strategic_category self._data = data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the", "this BotAction. :type description: str \"\"\" self._description = description @property def action_type(self) ->", "BotAction. :return: The data of this BotAction. :rtype: Dict \"\"\" return self._data @data.setter", "import absolute_import from datetime import date, datetime # noqa: F401 from typing import", "@description.setter def description(self, description: str): \"\"\"Sets the description of this BotAction. :param description:", "'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description self._action_type = action_type self._strategic_category", "F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from", "\"\"\" return self._description @description.setter def description(self, description: str): \"\"\"Sets the description of this", "str): \"\"\"Sets the description of this BotAction. :param description: The description of this", "Dict } self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data'", "this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self) ->", "Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category import StrategicCategory #", "in Swagger :param description: The description of this BotAction. # noqa: E501 :type", "generator program. Do not edit the class manually. \"\"\" def __init__(self, description: str=None,", "action_type: str :param strategic_category: The strategic_category of this BotAction. # noqa: E501 :type", "def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this BotAction. :param strategic_category: The", "of this BotAction. # noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property", "the data of this BotAction. :param data: The data of this BotAction. :type", "The data of this BotAction. # noqa: E501 :type data: Dict \"\"\" self.swagger_types", "BotAction. :param data: The data of this BotAction. :type data: Dict \"\"\" self._data", "__future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing", "data of this BotAction. # noqa: E501 :type data: Dict \"\"\" self.swagger_types =", "StrategicCategory, 'data': Dict } self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory',", "from datetime import date, datetime # noqa: F401 from typing import List, Dict", "manually. \"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): #", "this BotAction. :type action_type: str \"\"\" if action_type is None: raise ValueError(\"Invalid value", "strategic_category @property def data(self) -> Dict: \"\"\"Gets the data of this BotAction. :return:", "this BotAction. :rtype: str \"\"\" return self._description @description.setter def description(self, description: str): \"\"\"Sets", "\"\"\"Sets the strategic_category of this BotAction. :param strategic_category: The strategic_category of this BotAction.", "description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction -", "# noqa: E501 :type description: str :param action_type: The action_type of this BotAction.", ":param description: The description of this BotAction. # noqa: E501 :type description: str", "'data' } self._description = description self._action_type = action_type self._strategic_category = strategic_category self._data =", ":param strategic_category: The strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category =", "The description of this BotAction. # noqa: E501 :type description: str :param action_type:", "of this BotAction. :return: The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\" return", "StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return: The strategic_category of this BotAction.", "of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self)", "of this BotAction. :type action_type: str \"\"\" if action_type is None: raise ValueError(\"Invalid", "utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401", "BotAction. # noqa: E501 :type data: Dict \"\"\" self.swagger_types = { 'description': str,", "def description(self) -> str: \"\"\"Gets the description of this BotAction. :return: The description", "str :param action_type: The action_type of this BotAction. # noqa: E501 :type action_type:", "the action_type of this BotAction. :param action_type: The action_type of this BotAction. :type", "strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return: The strategic_category of", "description: The description of this BotAction. :type description: str \"\"\" self._description = description", "self._description @description.setter def description(self, description: str): \"\"\"Sets the description of this BotAction. :param", "class BotAction(Model): \"\"\"NOTE: This class is auto generated by the swagger code generator", "-> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return: The strategic_category of this", "return self._data @data.setter def data(self, data: Dict): \"\"\"Sets the data of this BotAction.", ":return: The BotAction of this BotAction. # noqa: E501 :rtype: BotAction \"\"\" return", "BotAction. :rtype: Dict \"\"\" return self._data @data.setter def data(self, data: Dict): \"\"\"Sets the", "BotAction. :param action_type: The action_type of this BotAction. :type action_type: str \"\"\" if", "The action_type of this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self,", "is None: raise ValueError(\"Invalid value for `action_type`, must not be `None`\") # noqa:", "-> Dict: \"\"\"Gets the data of this BotAction. :return: The data of this", "self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map", "Dict: \"\"\"Gets the data of this BotAction. :return: The data of this BotAction.", "noqa: E501 :type strategic_category: StrategicCategory :param data: The data of this BotAction. #", "'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict } self.attribute_map = { 'description': 'description', 'action_type':", "data of this BotAction. :return: The data of this BotAction. :rtype: Dict \"\"\"", "str): \"\"\"Sets the action_type of this BotAction. :param action_type: The action_type of this", "for `action_type`, must not be `None`\") # noqa: E501 self._action_type = action_type @property", "of this BotAction. :rtype: Dict \"\"\" return self._data @data.setter def data(self, data: Dict):", "def data(self, data: Dict): \"\"\"Sets the data of this BotAction. :param data: The", ":type: dict :return: The BotAction of this BotAction. # noqa: E501 :rtype: BotAction", "\"\"\"Sets the data of this BotAction. :param data: The data of this BotAction.", "{ 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description", "\"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type of this", "strategic_category self._data = data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict", "of this BotAction. :param description: The description of this BotAction. :type description: str", "dikt) -> 'BotAction': \"\"\"Returns the dict as a model :param dikt: A dict.", "A dict. :type: dict :return: The BotAction of this BotAction. # noqa: E501", "action_type: The action_type of this BotAction. :type action_type: str \"\"\" if action_type is", "The data of this BotAction. :rtype: Dict \"\"\" return self._data @data.setter def data(self,", "model :param dikt: A dict. :type: dict :return: The BotAction of this BotAction.", "data: The data of this BotAction. # noqa: E501 :type data: Dict \"\"\"", "class manually. \"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None):", "description of this BotAction. :return: The description of this BotAction. :rtype: str \"\"\"", "BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self) -> Dict:", "\"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of this", "def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501", "of this BotAction. # noqa: E501 :type data: Dict \"\"\" self.swagger_types = {", "data(self, data: Dict): \"\"\"Sets the data of this BotAction. :param data: The data", "coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa:", "E501 :type action_type: str :param strategic_category: The strategic_category of this BotAction. # noqa:", "data @classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as a model", "a model defined in Swagger :param description: The description of this BotAction. #", "import Model from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server import util", "@property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return: The", "@classmethod def from_dict(cls, dikt) -> 'BotAction': \"\"\"Returns the dict as a model :param", "-> str: \"\"\"Gets the description of this BotAction. :return: The description of this", ":return: The data of this BotAction. :rtype: Dict \"\"\" return self._data @data.setter def", "typing import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model from rlbot_action_server.models.strategic_category", "the swagger code generator program. Do not edit the class manually. \"\"\" def", "action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501 \"\"\"BotAction - a model", "util class BotAction(Model): \"\"\"NOTE: This class is auto generated by the swagger code", "action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category of this BotAction. :return:", "'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description self._action_type = action_type", "is auto generated by the swagger code generator program. Do not edit the", "noqa: E501 self._action_type = action_type @property def strategic_category(self) -> StrategicCategory: \"\"\"Gets the strategic_category", "noqa: E501 :type action_type: str :param strategic_category: The strategic_category of this BotAction. #", "The strategic_category of this BotAction. :type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property", "action_type of this BotAction. # noqa: E501 :type action_type: str :param strategic_category: The", "of this BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str):", "str :param strategic_category: The strategic_category of this BotAction. # noqa: E501 :type strategic_category:", "description self._action_type = action_type self._strategic_category = strategic_category self._data = data @classmethod def from_dict(cls,", "not be `None`\") # noqa: E501 self._action_type = action_type @property def strategic_category(self) ->", "strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self) -> Dict: \"\"\"Gets the", "defined in Swagger :param description: The description of this BotAction. # noqa: E501", ":type strategic_category: StrategicCategory \"\"\" self._strategic_category = strategic_category @property def data(self) -> Dict: \"\"\"Gets", "E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls) @property def description(self) -> str: \"\"\"Gets", "self._strategic_category = strategic_category @property def data(self) -> Dict: \"\"\"Gets the data of this", "F401 from typing import List, Dict # noqa: F401 from rlbot_action_server.models.base_model_ import Model", "description: str \"\"\" self._description = description @property def action_type(self) -> str: \"\"\"Gets the", "auto generated by the swagger code generator program. Do not edit the class", "\"\"\" return self._data @data.setter def data(self, data: Dict): \"\"\"Sets the data of this", "StrategicCategory # noqa: F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class", "BotAction of this BotAction. # noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt, cls)", ":param action_type: The action_type of this BotAction. # noqa: E501 :type action_type: str", "The strategic_category of this BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param data:", "@action_type.setter def action_type(self, action_type: str): \"\"\"Sets the action_type of this BotAction. :param action_type:", "a model :param dikt: A dict. :type: dict :return: The BotAction of this", "rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class is auto generated by the", "The BotAction of this BotAction. # noqa: E501 :rtype: BotAction \"\"\" return util.deserialize_model(dikt,", "'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description self._action_type", "Dict \"\"\" self.swagger_types = { 'description': str, 'action_type': str, 'strategic_category': StrategicCategory, 'data': Dict", "description(self) -> str: \"\"\"Gets the description of this BotAction. :return: The description of", "BotAction. :type description: str \"\"\" self._description = description @property def action_type(self) -> str:", "} self._description = description self._action_type = action_type self._strategic_category = strategic_category self._data = data", "The action_type of this BotAction. :type action_type: str \"\"\" if action_type is None:", ":return: The description of this BotAction. :rtype: str \"\"\" return self._description @description.setter def", "this BotAction. # noqa: E501 :type strategic_category: StrategicCategory :param data: The data of", "dict as a model :param dikt: A dict. :type: dict :return: The BotAction", "F401,E501 from rlbot_action_server import util class BotAction(Model): \"\"\"NOTE: This class is auto generated", "} self.attribute_map = { 'description': 'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' }", ":rtype: str \"\"\" return self._description @description.setter def description(self, description: str): \"\"\"Sets the description", "<reponame>tarehart/RLBotTwitchBroker<gh_stars>1-10 # coding: utf-8 from __future__ import absolute_import from datetime import date, datetime", "from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from", "strategic_category of this BotAction. :return: The strategic_category of this BotAction. :rtype: StrategicCategory \"\"\"", "\"\"\" def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa:", "BotAction. :rtype: str \"\"\" return self._action_type @action_type.setter def action_type(self, action_type: str): \"\"\"Sets the", "data: Dict): \"\"\"Sets the data of this BotAction. :param data: The data of", "action_type of this BotAction. :return: The action_type of this BotAction. :rtype: str \"\"\"", "`action_type`, must not be `None`\") # noqa: E501 self._action_type = action_type @property def", "The action_type of this BotAction. # noqa: E501 :type action_type: str :param strategic_category:", "# noqa: E501 :type strategic_category: StrategicCategory :param data: The data of this BotAction.", "data of this BotAction. :rtype: Dict \"\"\" return self._data @data.setter def data(self, data:", "= description @property def action_type(self) -> str: \"\"\"Gets the action_type of this BotAction.", "'description', 'action_type': 'actionType', 'strategic_category': 'strategicCategory', 'data': 'data' } self._description = description self._action_type =", "StrategicCategory \"\"\" return self._strategic_category @strategic_category.setter def strategic_category(self, strategic_category: StrategicCategory): \"\"\"Sets the strategic_category of", "from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501 from rlbot_action_server import util class BotAction(Model):" ]
[ "bool = True): \"\"\" Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end,", "info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str =", "elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median': df =", "import pandas as pd import numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True,", "a string to describe a df. \"\"\" info = info_prefix if shape: info", "info = info_prefix if shape: info = f'{info}Shape = {df.shape}' if cols: info", "in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n: if", "df. \"\"\" info = info_prefix if shape: info = f'{info}Shape = {df.shape}' if", "= True): \"\"\" Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq),", "df = df.set_index('time') if smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif", "return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string to describe a df. \"\"\"", "print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int =", "time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n", "smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min()", "df.set_index('time') if smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f ==", "import numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print", "info_prefix if shape: info = f'{info}Shape = {df.shape}' if cols: info = f'{info}", "return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f:", "elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max': df =", "string to describe a df. \"\"\" info = info_prefix if shape: info =", "np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string to", ", Cols = {df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s',", "f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02',", "smooth_f: str = 'mean', dropna: bool = True): \"\"\" Make dummy ts df.", "df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna: df", "df = df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'...", "\"\"\" Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data", "info = f'{info}Shape = {df.shape}' if cols: info = f'{info} , Cols =", "= {df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n:", "smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...')", "= {df.shape}' if cols: info = f'{info} , Cols = {df.columns.tolist()}' print(info) if", "for n in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df = df.set_index('time') if", "pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n: if smooth_f == 'mean': df", "= info_prefix if shape: info = f'{info}Shape = {df.shape}' if cols: info =", "= pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in", "n_cols), columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df =", "= df.set_index('time') if smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f", "str = 'mean', dropna: bool = True): \"\"\" Make dummy ts df. \"\"\"", "= pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range, data], axis=1)", "df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median': df", "shape: info = f'{info}Shape = {df.shape}' if cols: info = f'{info} , Cols", "\"\"\" info = info_prefix if shape: info = f'{info}Shape = {df.shape}' if cols:", "def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string to describe", "columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range,", "columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df = df.set_index('time')", "df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna: df = df.dropna()", "pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string to describe a df.", "pandas as pd import numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True,", "if shape: info = f'{info}Shape = {df.shape}' if cols: info = f'{info} ,", "{df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int", "= 100, smooth_f: str = 'mean', dropna: bool = True): \"\"\" Make dummy", "= df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median':", "Cols = {df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5,", "df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str = 'mean', dropna:", "Print a string to describe a df. \"\"\" info = info_prefix if shape:", "smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median()", "dropna: bool = True): \"\"\" Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start,", "freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df =", "True): \"\"\" Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time'])", "100, smooth_f: str = 'mean', dropna: bool = True): \"\"\" Make dummy ts", "'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna:", "info = f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info: return info def", "= 'mean', dropna: bool = True): \"\"\" Make dummy ts df. \"\"\" time_range", "as pd import numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''):", "== 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if", "= df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna: df =", "'mean', dropna: bool = True): \"\"\" Make dummy ts df. \"\"\" time_range =", "= f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info: return info def df_dummy_ts(start='2019-01-01',", "int = 100, smooth_f: str = 'mean', dropna: bool = True): \"\"\" Make", "pd import numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\"", "= df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max':", "to describe a df. \"\"\" info = info_prefix if shape: info = f'{info}Shape", "cols=True, info_prefix=''): \"\"\" Print a string to describe a df. \"\"\" info =", "end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str = 'mean', dropna: bool", "== 'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif", "smooth_n: int = 100, smooth_f: str = 'mean', dropna: bool = True): \"\"\"", "data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range, data],", "else: raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna: df = df.dropna() return", "axis=1) df = df.set_index('time') if smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean()", "df = pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n: if smooth_f ==", "n in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n:", "describe a df. \"\"\" info = info_prefix if shape: info = f'{info}Shape =", "dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range),", "if smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min':", "\"\"\" Print a string to describe a df. \"\"\" info = info_prefix if", "df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f ==", "elif smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not implemented", "= f'{info}Shape = {df.shape}' if cols: info = f'{info} , Cols = {df.columns.tolist()}'", "df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f} not", "pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df = pd.concat([time_range, data], axis=1) df", "== 'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median() else:", "df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max': df", "'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f", "Make dummy ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data =", "if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df =", "\"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for", "range(n_cols)]) df = pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n: if smooth_f", "cols: info = f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info: return info", "f'{info}Shape = {df.shape}' if cols: info = f'{info} , Cols = {df.columns.tolist()}' print(info)", "shape=True, cols=True, info_prefix=''): \"\"\" Print a string to describe a df. \"\"\" info", "freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str = 'mean', dropna: bool =", "if cols: info = f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info: return", "return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str", "df = df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max() elif smooth_f ==", "pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)])", "data], axis=1) df = df.set_index('time') if smooth_n: if smooth_f == 'mean': df =", "smooth_n: if smooth_f == 'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df", "df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}'", "def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100, smooth_f: str = 'mean',", "{df.shape}' if cols: info = f'{info} , Cols = {df.columns.tolist()}' print(info) if return_info:", "'max': df = df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise", "'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f", "a df. \"\"\" info = info_prefix if shape: info = f'{info}Shape = {df.shape}'", "as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string", "smooth_f == 'min': df = df.rolling(smooth_n).min() elif smooth_f == 'max': df = df.rolling(smooth_n).max()", "== 'mean': df = df.rolling(smooth_n).mean() elif smooth_f == 'min': df = df.rolling(smooth_n).min() elif", "= pd.concat([time_range, data], axis=1) df = df.set_index('time') if smooth_n: if smooth_f == 'mean':", "if return_info: return info def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5, smooth_n: int = 100,", "end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)]) df", "= df.rolling(smooth_n).max() elif smooth_f == 'median': df = df.rolling(smooth_n).median() else: raise NotImplementedError(f'... {smooth_f}", "raise NotImplementedError(f'... {smooth_f} not implemented ...') if dropna: df = df.dropna() return df", "numpy as np def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a", "info_prefix=''): \"\"\" Print a string to describe a df. \"\"\" info = info_prefix", "df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''): \"\"\" Print a string to describe a", "ts df. \"\"\" time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time']) data = pd.DataFrame(np.random.randn(len(time_range), n_cols),", "n_cols=5, smooth_n: int = 100, smooth_f: str = 'mean', dropna: bool = True):" ]
[ "is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self):", "heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer", "ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class =", "rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from heritages.models import Heritage, Multimedia, Annotation", "None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"]", "class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,)", "viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset =", "viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from heritages.models import Heritage,", "queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try:", "= AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet):", "not keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i", "if not keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for", "heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def", "raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer", "result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset =", "django.core.files.uploadedfile import UploadedFile from django.conf import settings from django.http import HttpResponse from rest_framework", "viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,) def get_me(self, request):", "i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView):", "import Response from rest_framework.viewsets import ViewSet from rest_framework import mixins from rest_framework import", "return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"])", "perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound()", "serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id)", "def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file =", "result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset =", "import PermissionDenied from django.contrib.auth.models import User from heritages.models import Heritage, Multimedia, Annotation from", "PermissionDenied from django.contrib.auth.models import User from heritages.models import Heritage, Multimedia, Annotation from heritages.search", "Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer,", "def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return", "django.conf import settings from django.http import HttpResponse from rest_framework import generics from rest_framework.exceptions", "class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset =", "return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args,", "try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset", "serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return", "from django.core.files.uploadedfile import UploadedFile from django.conf import settings from django.http import HttpResponse from", "raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class", "*args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView):", "return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class =", "from rest_framework.viewsets import ViewSet from rest_framework import mixins from rest_framework import viewsets from", "queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]}", "HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args, **kwargs): keyword", "**kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result", "return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all()", "keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in", "rest_framework import generics from rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.viewsets", "if not keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for", "return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class =", "MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id):", "Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def", "HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class", "search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import", "except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class =", "import UploadedFile from django.conf import settings from django.http import HttpResponse from rest_framework import", "try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer):", "Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer", "= HeritageSerializer def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not", "i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def", "from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from", "for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class", "self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView):", "import NotFound from rest_framework.response import Response from rest_framework.viewsets import ViewSet from rest_framework import", "return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except", "result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class =", "try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file,", "else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request,", "Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer", "= MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return", "result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return", "Multimedia, Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer,", "not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self): return", "for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class", "AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset", "= HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"])", "Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None)", "HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except", "get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None)", "rest_framework.viewsets import ViewSet from rest_framework import mixins from rest_framework import viewsets from rest_framework.exceptions", "UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset =", "*args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView):", "= self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else:", "return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"])", "queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,) def get_me(self, request): return", "= MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except", "UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,) def", "return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if", "AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not", "IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request,", "ViewSet from rest_framework import mixins from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied", "m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\")", "MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist:", "= Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try:", "self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return", "= Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all()", "*args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs)", "django.contrib.auth.models import User from heritages.models import Heritage, Multimedia, Annotation from heritages.search import search_heritages,", "= self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class", "django.http import HttpResponse from rest_framework import generics from rest_framework.exceptions import NotFound from rest_framework.response", "from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from heritages.models import Heritage, Multimedia,", "except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try:", "keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result =", "serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage =", "= self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data", "self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword:", "class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request,", "NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id)", "def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword", "content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id =", "@staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file", "get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file)", "*args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView):", "Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset", "return queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def", "for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all()", "AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class", "def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise", "Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class", "= Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer", "heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage)", "HttpResponse from rest_framework import generics from rest_framework.exceptions import NotFound from rest_framework.response import Response", "file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self):", "Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class", "return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword", "= AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self,", "IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self,", "AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin,", "from django.contrib.auth.models import User from heritages.models import Heritage, Multimedia, Annotation from heritages.search import", "search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions", "User from heritages.models import Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations from", "NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs):", "ObjectDoesNotExist from django.core.files.uploadedfile import UploadedFile from django.conf import settings from django.http import HttpResponse", "from rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.viewsets import ViewSet from", "NotFound from rest_framework.response import Response from rest_framework.viewsets import ViewSet from rest_framework import mixins", "return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod", "return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self):", "list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request,", "AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request,", "Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class", "None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"]", "serializer_class = MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound()", "from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner,", "if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound()", "rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from heritages.models", "import HttpResponse from rest_framework import generics from rest_framework.exceptions import NotFound from rest_framework.response import", "import settings from django.http import HttpResponse from rest_framework import generics from rest_framework.exceptions import", "NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def", "import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf", "from heritages.models import Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers", "super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class", "UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes =", "= Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all()", "rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.viewsets import ViewSet from rest_framework", "= Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m", "Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return", "def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\",", "keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in", "serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class", "def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def", "self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return", "Response from rest_framework.viewsets import ViewSet from rest_framework import mixins from rest_framework import viewsets", "raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage =", "**kwargs) result = Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset", "self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword:", "MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia", "heritages.models import Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers import", "Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword =", "= self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data", "from heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\", "from django.http import HttpResponse from rest_framework import generics from rest_framework.exceptions import NotFound from", "from rest_framework.response import Response from rest_framework.viewsets import ViewSet from rest_framework import mixins from", "self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args, **kwargs) result = Response(search_heritages(keyword)).data return", "import generics from rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.viewsets import", "= UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset", "get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self,", "= Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id =", "ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage", "{\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not", "\\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all()", "**kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset", "Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class", "AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset", "class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args, **kwargs):", "import ViewSet from rest_framework import mixins from rest_framework import viewsets from rest_framework.exceptions import", "User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class =", "class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist:", "= User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,) def get_me(self, request): return Response(self.serializer_class(request.user).data)", "heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous,", "import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from", "IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args,", "= Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\",", "Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer", "self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if", "AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class", "request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword: return super().list(request, *args,", "generics from rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.viewsets import ViewSet", "NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet):", "not keyword: return super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i", "class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"]", "class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer", "= Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer", "def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None:", "import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User from heritages.models import", "class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise", "= Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class", "Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return", "AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all()", "serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def", "rest_framework.response import Response from rest_framework.viewsets import ViewSet from rest_framework import mixins from rest_framework", "MultimediaSerializer, AnnotationSerializer, UserSerializer, \\ AnnotationPaleSerializer from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView):", "import ObjectDoesNotExist from django.core.files.uploadedfile import UploadedFile from django.conf import settings from django.http import", "heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"] try: heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist:", "from django.core.exceptions import ObjectDoesNotExist from django.core.files.uploadedfile import UploadedFile from django.conf import settings from", "None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\":", "mixins from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import User", "queryset = Heritage.objects.all() serializer_class = HeritageSerializer def list(self, request, *args, **kwargs): keyword =", "serializer_class = HeritageSerializer def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if", "= Response(search_heritages(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all()", "queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class =", "from rest_framework import generics from rest_framework.exceptions import NotFound from rest_framework.response import Response from", "heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset", "serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin,", "get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args, **kwargs):", "UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset =", "in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset", "= UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes", "MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id)", "AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if", "result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset =", "mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer permission_classes = (IsSelf,) def get_me(self,", "queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset =", "= queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\":", "return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\",", "heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id", "Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer,", "heritage = Heritage.objects.get(pk=heritage_id) except ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset =", "except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class", "{\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None)", "import mixins from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models import", "result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class =", "i in result[\"hits\"][\"hits\"]) class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView):", "queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self):", "get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset", "def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def list(self, request, *args,", "heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer", "ObjectDoesNotExist: raise NotFound() return serializer.save(heritage=heritage) class MultimediaView(generics.RetrieveDestroyAPIView): queryset = Multimedia.objects.all() serializer_class = MultimediaSerializer", "queryset = queryset.filter(target__target_id__contains=heritage_id) return queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(),", "from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class =", "HeritageSerializer def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not keyword:", "= User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class", "in result[\"hits\"][\"hits\"]) class HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class", "**kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class", "from django.conf import settings from django.http import HttpResponse from rest_framework import generics from", "MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage = Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise", "import IsOwner, IsNotAnonymous, IsSelf class HeritagesListView(generics.ListCreateAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer def", "rest_framework import mixins from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from django.contrib.auth.models", "queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all()", "\"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword = self.request.query_params.get(\"keyword\", None) if not", "in result[\"hits\"][\"hits\"]) class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self):", "queryset else: return NotFound() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self,", "from rest_framework import mixins from rest_framework import viewsets from rest_framework.exceptions import PermissionDenied from", "queryset = Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class", "super().list(request, *args, **kwargs) result = Response(search_annotations(keyword)).data return Response(i[\"_source\"] for i in result[\"hits\"][\"hits\"]) class", "settings from django.http import HttpResponse from rest_framework import generics from rest_framework.exceptions import NotFound", "Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def get_queryset(self): try: heritage", "= Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is not None: queryset = queryset.filter(target__target_id__contains=heritage_id)", "UploadedFile from django.conf import settings from django.http import HttpResponse from rest_framework import generics", "get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri(), \"heritage_id\": self.kwargs[\"heritage_id\"]} def list(self, request, *args, **kwargs): keyword =", "= AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin,", "serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer", "AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()}", "HeritageView(generics.RetrieveUpdateDestroyAPIView): queryset = Heritage.objects.all() serializer_class = HeritageSerializer class MultimediaListView(generics.ListCreateAPIView): serializer_class = MultimediaSerializer def", "serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id", "HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView): serializer_class = AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id", "import User from heritages.models import Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations", "multimedia_id): try: m = Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return", "= Annotation.objects.all() serializer_class = AnnotationPaleSerializer class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class =", "class AnnotationPaleListView(generics.ListCreateAPIView): serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\":", "django.core.exceptions import ObjectDoesNotExist from django.core.files.uploadedfile import UploadedFile from django.conf import settings from django.http", "Heritage.objects.get(pk=self.kwargs[\"heritage_id\"]) except ObjectDoesNotExist: raise NotFound() return heritage.multimedia def perform_create(self, serializer): heritage_id = self.kwargs[\"heritage_id\"]", "= AnnotationSerializer def get_queryset(self): queryset = Annotation.objects.all() heritage_id = self.kwargs[\"heritage_id\"] if heritage_id is", "Multimedia.objects.all() serializer_class = MultimediaSerializer class MultimediaFileView(ViewSet): @staticmethod def get_file(request, multimedia_id): try: m =", "serializer_class = AnnotationPaleSerializer def get_queryset(self): return Annotation.objects.all() def get_serializer_context(self): return {\"target_id\": self.request.build_absolute_uri()} def", "class AnnotationView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all() serializer_class = AnnotationSerializer class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView): queryset = Annotation.objects.all()", "class Users(mixins.CreateModelMixin, viewsets.GenericViewSet): queryset = User.objects.all() serializer_class = UserSerializer class UserDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet):", "Multimedia.objects.get(pk=multimedia_id) except ObjectDoesNotExist: raise NotFound(multimedia_id) file = UploadedFile(m.file) return HttpResponse(file, content_type=\"image/png\") class AnnotationListView(generics.ListCreateAPIView):", "import Heritage, Multimedia, Annotation from heritages.search import search_heritages, search_annotations from heritages.serializers import HeritageSerializer," ]
[]
[ "aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j = 0 while j <", "while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1 return", "self.cadena: while i < numFunciones: if len(linea) == 0: i += 1 break", "textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado = textoCifrado def", "= textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1", "self.series[i] while j < len(tmp): funciones.append(tmp[j]) j += 1 i += 1 j", "= list() i = 0 j = 0 while i < longSeries: tmp", "class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena self.series = series self.textoClaro", "TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena self.series = series self.textoClaro =", "self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j < saltosLinea: textoCifrado += linea_a_cifrar", "if len(linea) == 0: i += 1 break if j < saltosLinea: linea_a_descifrar", "= 0 j = 0 for linea in self.cadena: while i < numFunciones:", "j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\"", "j = 0 for linea in self.cadena: if len(linea) == 0: i +=", "0 while i < longSeries: tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j])", "= list() longSeries = len(self.series) tmp = list() i = 0 j =", "break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar", "nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [], []", "# -*- coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena =", "break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j < saltosLinea:", "+= linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones,", "def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos =", "= len(self.series) i = 0 j = 0 for linea in self.cadena: while", "funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones))", "linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos = 0 i", "< lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in", "= 0 while i < longSeries: tmp = self.series[i] while j < len(tmp):", "\"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i = 0", "longSeries = len(self.series) tmp = list() i = 0 j = 0 while", "textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i", "tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva def aplicarSeries_descifrar(self,", "j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva", "+= 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado", "def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones =", "\"\" j = 0 while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ]", "[], [], [] ] funciones = list() longSeries = len(self.series) tmp = list()", "int(self.series[i][j])-1 ] j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque", "i += 1 break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones))", "len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro = textoDescifrado", "python3 # -*- coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena", "linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar +=", "descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones()", "+= 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado", "= 0 i = 0 while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]})", "series, cadena=None): self.cadena = cadena self.series = series self.textoClaro = \"\" self.textoCifrado =", "linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\"", "while i < numFunciones: if len(linea) == 0: i += 1 break linea_a_cifrar", "+= linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado = textoCifrado def descifrar(self):", "return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list()", "self.concatenarFunciones() i = 0 j = 0 for linea in self.cadena: if len(linea)", "+= self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i = 0", "while i < longSeries: tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j]) j", "textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j = 0 while", "len(tmp): funciones.append(tmp[j]) j += 1 i += 1 j = 0 return funciones", "textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado +=", "tmp = list() i = 0 j = 0 while i < longSeries:", "def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones =", "self.series = series self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado =", "j = 0 while i < longSeries: tmp = self.series[i] while j <", "saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j = 0 for", "self.cadena: if len(linea) == 0: i += 1 break if j < saltosLinea:", "0 i = 0 while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i", "cadena self.series = series self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado", "= 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\"", "tamanioFuncion): lineaNueva = \"\" j = 0 while j < tamanioFuncion: lineaNueva +=", "= 0 while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j +=", "= \"\" i = 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\"", "linea_a_cifrar = \"\" i = 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado =", "len(self.cadena)-1 numFunciones = len(self.series) i = 0 j = 0 for linea in", "1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado =", "0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1", "< numFunciones: if len(linea) == 0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea,", "\"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\"", "= len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j = 0 for linea", "= textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j = 0", "linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones):", "i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor", "len(self.series[i])) i += 1 if j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\"", "linea_a_descifrar = \"\" i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i,", "= {} bloqueDescifrado = list() pos = 0 i = 0 while i", "else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i", "lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in nuevoBloque.items():", "for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self):", "llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[", "= 0 j = 0 for linea in self.cadena: if len(linea) == 0:", "if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar +", "nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado)", "1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def", "\"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea", "for linea in self.cadena: if len(linea) == 0: i += 1 break if", "0 while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1", "0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j", "j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {}", "[], [] ] funciones = list() longSeries = len(self.series) tmp = list() i", "i < longSeries: tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j]) j +=", "= 0 while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1", "lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos", "aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos = 0", "series self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar", "+= 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado", "\"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j = 0", "len(self.series) tmp = list() i = 0 j = 0 while i <", "+= 1 if j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j +=", "int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado =", "self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro", "0: i += 1 break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones,", "{} bloqueDescifrado = list() pos = 0 i = 0 while i <", "1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado =", "lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos = 0 i = 0", "list() longSeries = len(self.series) tmp = list() i = 0 j = 0", "nuevoBloque = {} bloqueDescifrado = list() pos = 0 i = 0 while", "i += 1 if j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j", "pos = 0 i = 0 while i < lenFunciones: pos = int(funciones[i])-1", "bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ]", "\"\" i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva", "i = 0 j = 0 for linea in self.cadena: if len(linea) ==", "+ \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar =", "+ \"\\n\" j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i", "= \"\" i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion):", "0 j = 0 for linea in self.cadena: if len(linea) == 0: i", "= len(self.cadena)-1 numFunciones = len(self.series) i = 0 j = 0 for linea", "saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar", "tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j]) j += 1 i +=", "0 j = 0 for linea in self.cadena: while i < numFunciones: if", "saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado += linea_a_cifrar", "funciones = list() longSeries = len(self.series) tmp = list() i = 0 j", "= \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i = 0 j =", "= self.series[i] while j < len(tmp): funciones.append(tmp[j]) j += 1 i += 1", "numFunciones: if len(linea) == 0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i,", "linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i = 0 j", "coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena self.series", "+= self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea,", "def concatenarFunciones(self): #[ [], [], [] ] funciones = list() longSeries = len(self.series)", "i = 0 j = 0 while i < longSeries: tmp = self.series[i]", "+= 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j", "textoCifrado += linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar", "< saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado +=", "list() pos = 0 i = 0 while i < lenFunciones: pos =", "while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave,", "funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro =", "''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ] funciones = list()", "= \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\"", "__init__(self, series, cadena=None): self.cadena = cadena self.series = series self.textoClaro = \"\" self.textoCifrado", "linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i =", "i = 0 j = 0 for linea in self.cadena: while i <", "else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado = textoCifrado", "= \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i =", "= len(self.series) tmp = list() i = 0 j = 0 while i", "= cadena self.series = series self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self):", "0 for linea in self.cadena: if len(linea) == 0: i += 1 break", "linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea,", "len(linea) == 0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i", "len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j = 0 for linea in", "numFunciones = len(self.series) i = 0 j = 0 for linea in self.cadena:", "+= self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j < saltosLinea: textoCifrado +=", "linea_a_cifrar linea_a_cifrar = \"\" i = 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado", "return bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ] funciones = list() longSeries", "longSeries: tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j]) j += 1 i", "<reponame>pordnajela/AlgoritmosCriptografiaClasica #!/usr/bin/env python3 # -*- coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series,", "list() i = 0 j = 0 while i < longSeries: tmp =", "self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea =", "textoDescifrado += linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro = textoDescifrado def", "= \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j =", "i < numFunciones: if len(linea) == 0: i += 1 break linea_a_cifrar +=", "len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado", "self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones,", "[] ] funciones = list() longSeries = len(self.series) tmp = list() i =", "= series self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\"", "= int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado", "0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea", "lineaNueva = \"\" j = 0 while j < tamanioFuncion: lineaNueva += linea[", "i = 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar =", "linea in self.cadena: if len(linea) == 0: i += 1 break if j", "funciones, lenFunciones): nuevoBloque = {} bloqueDescifrado = list() pos = 0 i =", "cadena=None): self.cadena = cadena self.series = series self.textoClaro = \"\" self.textoCifrado = \"\"", "linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j < saltosLinea: textoCifrado", "] funciones = list() longSeries = len(self.series) tmp = list() i = 0", "0 while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for", "i += 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return", "= list() pos = 0 i = 0 while i < lenFunciones: pos", "0 for linea in self.cadena: while i < numFunciones: if len(linea) == 0:", "\"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0", "< saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar + \"\\n\" else:", "\"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones", "\"\" i = 0 self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar", "= \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1", "\"\\n\" j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i =", "= 0 for linea in self.cadena: while i < numFunciones: if len(linea) ==", "len(linea) == 0: i += 1 break if j < saltosLinea: linea_a_descifrar +=", "i = 0 while i < lenFunciones: pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i +=", "concatenarFunciones(self): #[ [], [], [] ] funciones = list() longSeries = len(self.series) tmp", "-*- coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena", "1 break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado +=", "#!/usr/bin/env python3 # -*- coding: UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None):", "1 if j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j += 1", "cifrar(self): textoCifrado = \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series)", "+= linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar", "= \"\" j = 0 while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1", "pos = int(funciones[i])-1 nuevoBloque.update({pos:linea[i]}) i += 1 for llave, valor in nuevoBloque.items(): bloqueDescifrado.append(valor)", "self.textoClaro = \"\" self.textoCifrado = \"\" def cifrar(self): textoCifrado = \"\" linea_a_cifrar =", "bloqueDescifrado = list() pos = 0 i = 0 while i < lenFunciones:", "j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar = \"\" i = 0", "if j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j += 1 else:", "while j < len(tmp): funciones.append(tmp[j]) j += 1 i += 1 j =", "self.cadena = cadena self.series = series self.textoClaro = \"\" self.textoCifrado = \"\" def", "valor in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [],", "bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ] funciones", "\"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i = 0 j = 0", "linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i = 0 j", "i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva =", "linea, i, tamanioFuncion): lineaNueva = \"\" j = 0 while j < tamanioFuncion:", "i, tamanioFuncion): lineaNueva = \"\" j = 0 while j < tamanioFuncion: lineaNueva", "i, len(self.series[i])) i += 1 if j < saltosLinea: textoCifrado += linea_a_cifrar +", "self.textoCifrado = textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea =", "+= linea_a_descifrar linea_a_descifrar = \"\" i = 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self,", "-*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena self.series = series", "j < saltosLinea: textoCifrado += linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado", "len(self.series) i = 0 j = 0 for linea in self.cadena: while i", "i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if", "textoCifrado def descifrar(self): textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones", "1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i += 1 if j <", "in self.cadena: while i < numFunciones: if len(linea) == 0: i += 1", "j = 0 while j < tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j", "= 0 for linea in self.cadena: if len(linea) == 0: i += 1", "] j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea, funciones, lenFunciones): nuevoBloque =", "funciones = self.concatenarFunciones() i = 0 j = 0 for linea in self.cadena:", "j < len(tmp): funciones.append(tmp[j]) j += 1 i += 1 j = 0", "== 0: i += 1 break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea,", "= 0 self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\"", "= 0 j = 0 while i < longSeries: tmp = self.series[i] while", "for linea in self.cadena: while i < numFunciones: if len(linea) == 0: i", "< longSeries: tmp = self.series[i] while j < len(tmp): funciones.append(tmp[j]) j += 1", "self.textoClaro = textoDescifrado def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j =", "def aplicarSeries_cifrar(self, linea, i, tamanioFuncion): lineaNueva = \"\" j = 0 while j", "lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva def aplicarSeries_descifrar(self, linea,", "in self.cadena: if len(linea) == 0: i += 1 break if j <", "#[ [], [], [] ] funciones = list() longSeries = len(self.series) tmp =", "j = 0 for linea in self.cadena: while i < numFunciones: if len(linea)", "= ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ] funciones =", "bloqueDescifrado def concatenarFunciones(self): #[ [], [], [] ] funciones = list() longSeries =", "< len(tmp): funciones.append(tmp[j]) j += 1 i += 1 j = 0 return", "textoDescifrado = \"\" linea_a_descifrar = \"\" saltosLinea = len(self.cadena)-1 funciones = self.concatenarFunciones() i", "linea in self.cadena: while i < numFunciones: if len(linea) == 0: i +=", "in nuevoBloque.items(): bloqueDescifrado.append(valor) bloqueDescifrado = ''.join(bloqueDescifrado) return bloqueDescifrado def concatenarFunciones(self): #[ [], [],", "== 0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i])) i +=", "0 j = 0 while i < longSeries: tmp = self.series[i] while j", "= \"\" linea_a_cifrar = \"\" saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i =", "saltosLinea = len(self.cadena)-1 numFunciones = len(self.series) i = 0 j = 0 for", "= self.concatenarFunciones() i = 0 j = 0 for linea in self.cadena: if", "UTF-8 -*- class TransposicionSerie(object): def __init__(self, series, cadena=None): self.cadena = cadena self.series =", "+= linea_a_cifrar + \"\\n\" j += 1 else: textoCifrado += linea_a_cifrar linea_a_cifrar =", "< tamanioFuncion: lineaNueva += linea[ int(self.series[i][j])-1 ] j += 1 return lineaNueva def", "linea_a_descifrar + \"\\n\" else: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado += linea_a_descifrar linea_a_descifrar", "if len(linea) == 0: i += 1 break linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i]))", "def __init__(self, series, cadena=None): self.cadena = cadena self.series = series self.textoClaro = \"\"", "+= 1 break if j < saltosLinea: linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones)) textoDescifrado" ]
[ "cv2 import numpy as np def globimgs(path, globs:list): \"\"\"returns a list of files", "alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img,", "with more than one extensions\"\"\" imgs = [] for i in globs: imgs.extend(glob.glob(path", "glob import cv2 import numpy as np def globimgs(path, globs:list): \"\"\"returns a list", "path with globing with more than one extensions\"\"\" imgs = [] for i", "cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img", "globs:list): \"\"\"returns a list of files with path with globing with more than", "import numpy as np def globimgs(path, globs:list): \"\"\"returns a list of files with", "import glob import cv2 import numpy as np def globimgs(path, globs:list): \"\"\"returns a", "\"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img =", "globs: imgs.extend(glob.glob(path + i)) paths = [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\"))", "path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7,", "bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy()", "as np def globimgs(path, globs:list): \"\"\"returns a list of files with path with", "cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC)", "[] for i in globs: imgs.extend(glob.glob(path + i)) paths = [] for path", "i in globs: imgs.extend(glob.glob(path + i)) paths = [] for path in imgs:", "imgs.extend(glob.glob(path + i)) paths = [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return", "15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0,", "- cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _,", "extensions\"\"\" imgs = [] for i in globs: imgs.extend(glob.glob(path + i)) paths =", "= diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230,", "imgs = [] for i in globs: imgs.extend(glob.glob(path + i)) paths = []", "dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,", "_, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)", "more than one extensions\"\"\" imgs = [] for i in globs: imgs.extend(glob.glob(path +", "numpy as np def globimgs(path, globs:list): \"\"\"returns a list of files with path", "one extensions\"\"\" imgs = [] for i in globs: imgs.extend(glob.glob(path + i)) paths", "7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img", "diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255,", "= [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img", "in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7),", "= cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img,", "def globimgs(path, globs:list): \"\"\"returns a list of files with path with globing with", "+ i)) paths = [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths", "= [] for i in globs: imgs.extend(glob.glob(path + i)) paths = [] for", "in globs: imgs.extend(glob.glob(path + i)) paths = [] for path in imgs: paths.append(path.replace(\"\\\\\",", "\"\"\"returns a list of files with path with globing with more than one", "beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0,", "255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)", "paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15)", "cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img,", "norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0, beta=255,", "globing with more than one extensions\"\"\" imgs = [] for i in globs:", "i)) paths = [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def", "return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img,", "import cv2 import numpy as np def globimgs(path, globs:list): \"\"\"returns a list of", "paths = [] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img):", "imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8))", "of files with path with globing with more than one extensions\"\"\" imgs =", "for i in globs: imgs.extend(glob.glob(path + i)) paths = [] for path in", "list of files with path with globing with more than one extensions\"\"\" imgs", "a list of files with path with globing with more than one extensions\"\"\"", "for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img,", "dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255", "= cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 -", "np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img) norm_img =", "bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img =", "globimgs(path, globs:list): \"\"\"returns a list of files with path with globing with more", "with globing with more than one extensions\"\"\" imgs = [] for i in", "= 255 - cv2.absdiff(img, bg_img) norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,", "norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img,", "[] for path in imgs: paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img =", "def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img", "scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img =", "with path with globing with more than one extensions\"\"\" imgs = [] for", "thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) return", "= cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC) cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) return thr_img", "than one extensions\"\"\" imgs = [] for i in globs: imgs.extend(glob.glob(path + i))", "np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img, bg_img)", "np def globimgs(path, globs:list): \"\"\"returns a list of files with path with globing", "diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img, 230, 0,", "files with path with globing with more than one extensions\"\"\" imgs = []", "norm_img = diff_img.copy() cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) _, thr_img = cv2.threshold(norm_img,", "cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img = cv2.medianBlur(dilated_img, 15) diff_img = 255 - cv2.absdiff(img,", "paths.append(path.replace(\"\\\\\", \"/\")) return paths def scaneffects(img): dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8)) bg_img" ]
[ "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock):", "= mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value =", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\":", "dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model:", "with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "= Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock):", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "\"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\": \"test\", \"id\": 12}]),", "test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with", "Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\")", "None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value =", "\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock", "12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock):", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock):", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel =", "folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\")", "with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel", "def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel", "\"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel", "list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "= Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"}))", "= Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock):", "mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self,", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception):", "= mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "\"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(", "\"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel =", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel", "test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "\"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self,", "mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\":", "\"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model:", "mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}])", "Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) )", "self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel =", "None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\":", "folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ):", ") with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel =", "with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(),", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\":", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}]))", "mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]),", "deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel =", "mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder", "10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self,", "= mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\",", "all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel =", "Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\",", "self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\",", "patch from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\":", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model:", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value =", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\":", "Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception):", "\"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12,", "mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock):", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self,", "= Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\":", "Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\",", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\",", "folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "= list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}]", "12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel =", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel", "= Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model:", "\"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\")", "\"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\",", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self,", "\"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None,", "12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), )", "= Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def", "dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel =", "12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel =", "mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\")", "\"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(),", "@patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock", "self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model:", "call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model:", "self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") )", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value =", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self):", "\"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self,", "self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock):", "call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel =", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\")", "mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12))", "Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel", "= Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock):", "test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "= mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model:", "\"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"),", "Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\":", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match(", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model:", "folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\"))", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}]))", "None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel", "call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel =", "folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value", "= mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "\"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel", "test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock):", "folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model:", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\":", "12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\": \"test\", \"id\": 12}]), folder.get_all_folder_ids_and_names()", "call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), )", "self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel =", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value", "\"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "= mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model:", "self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock):", "Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value =", "mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self,", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value =", "test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\",", "12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\":", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\",", "overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model:", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock):", "Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"}))", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value =", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\",", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value", "call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError):", "\"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model:", "folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value", "\"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\":", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with", "return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\": \"test\",", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock):", "def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "= Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\":", "12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "= mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self,", "Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\":", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock", "def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\")", "@patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError):", "= Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def", "APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel =", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"}))", "= mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel =", "Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel =", "dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model:", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model:", "Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\")", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model:", "Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\")", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock):", "12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel", "def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value", "= mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10)", "mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self,", "def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0,", "def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self,", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\")", "def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\")", "mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception):", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json =", "= mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\":", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None,", "def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self,", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True),", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "= Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}),", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self,", "= mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\")", "folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value", "with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", ") def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with", "call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel =", "self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}),", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\":", "mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock):", "= Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def", "= mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\":", "folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self,", "mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model:", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self,", "def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict())", "test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\")", "None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), )", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock):", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10)", "= dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel =", "= Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel", "12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "\"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12,", "= Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None,", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None,", "0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock())", "Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\"))", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self,", "\"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock):", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}))", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self,", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value", "\"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\":", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual(", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value =", "unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder", "Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\",", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12,", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12}))", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel =", "None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model:", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}]))", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock):", "mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel =", "12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "\"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\",", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError):", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\":", "def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "\"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "= mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"}))", "12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model:", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value =", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock", "Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self,", "test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model:", "= Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\":", "mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock):", "Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\",", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel =", "\"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel", "self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model:", "def test_get_folder_by_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\":", "Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model:", "MagicMock, Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase):", "Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock()", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value", "Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model:", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\",", "= Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\":", "= mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock):", "Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(),", "Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel", "\"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel =", "mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders()", "= mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "= mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel", "= Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\")", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel", "Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "\"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\":", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}),", "call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list()", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"}))", "Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None,", "self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model:", "self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value", "\"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\":", "= Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock):", "call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel", "Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\")", "= Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None,", "\"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_title(self, call_the_api_mock): model: APIModel", "mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\":", "<filename>tests/unittests/test_folder.py from unittest import TestCase from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def", "= Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "= list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self,", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json", "self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12}))", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError):", "Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\":", "\"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\": \"test\", \"id\": 12}]), folder.get_all_folder_ids_and_names() )", "call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel =", "folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception):", "mock: Mock = Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]),", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value", "mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\":", "mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model:", "def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel =", "call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\")", "Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\")", "call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel =", "= APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}])", "= mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock):", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None,", "def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self,", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\":", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12}))", "Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual(", "= Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\")", "= Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None,", "dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\")", "self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model:", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\")", "mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "\"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel", "dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock):", "import TestCase from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import APIModel from", "from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self,", "FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value", "folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(),", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None,", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self,", "with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value =", "dict() with self.assertRaises(ValueError): folder.update_folder_permissions(\"\", dict()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\",", "def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock", "self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}), folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self,", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock):", "= Mock() mock.json = Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value", "unittest import TestCase from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import APIModel", "= mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value", "= Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}),", "self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\")", "from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder import", "folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with", "folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel =", "test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self,", "= mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value =", ") def test_get_folder_id_by_dashboard_path_general_path(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual(", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list())", "call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self,", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception):", "call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel", "= mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "from src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel =", "self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel =", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "\"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "= Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders())", "folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] ) with", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\":", "Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model:", "folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value =", "self.assertRaises(Exception): folder.update_folder(\"test\", \"test\", 10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\")", "with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) )", "call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = dict()", "Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model:", "mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\":", "[{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model:", "Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder.update_folder(\"test\", \"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value", "def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12,", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "\"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\",", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict())", "None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "def test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "\"test1\", 10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value =", "= Mock() mock.json = Mock(return_value=dict({\"title\": \"test1\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\":", "list( [{\"title\": None, \"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock):", "None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "= Mock(return_value=dict({\"message\": \"Folder deleted\"})) call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock):", "test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\",", "test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list()) call_the_api_mock.return_value =", "mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock):", "folder.create_folder(\"test\", \"test\"), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value = mock", "= Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock", "Mock = Mock() mock.json = Mock(return_value=list([{\"title\": None, \"id\": 12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\":", "mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\")", "all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value =", "= Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", \"test\", overwrite=True), )", "12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock):", "test_get_folder_by_id_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "= Mock( return_value=list([{\"title\": \"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual(", "Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") )", "\"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions(self, call_the_api_mock): model: APIModel", "folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "\"test\", \"id\": 12, \"test\": \"test\"}]) ) call_the_api_mock.return_value = mock self.assertEqual( list([{\"title\": \"test\", \"id\":", "updated\"})) call_the_api_mock.return_value = mock self.assertEqual( None, folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_no_uid(self,", "Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions updated\"})) call_the_api_mock.return_value = mock self.assertEqual(", "mock with self.assertRaises(ValueError): folder.delete_folder(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock())", "= mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel", "def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test1\", \"id\": 12}), folder.update_folder(\"test\", \"test1\", 10),", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def", "call_the_api_mock.return_value = mock self.assertEqual(None, folder.delete_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_delete_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_error_response(self,", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_id(10) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel", "test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "= mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel", "Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id(self, call_the_api_mock): model: APIModel", "mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel =", "mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception):", "test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0)", "MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder deleted\"}))", "test_update_folder_no_title(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list( [{\"title\": None, \"id\": \"xty13y\"}] )", "Mock(return_value=dict({\"message\": \"test\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.update_folder_permissions(\"test\", dict({\"test\": \"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self,", "Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def", "= Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\")", "call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock =", "token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock( return_value=list([{\"title\":", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folder_by_uid(\"xty13y\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\": \"test\"}),", ") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "= Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.create_folder(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder(self, call_the_api_mock): model:", "self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\")", "call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_uid(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid_error_response(self, call_the_api_mock): model: APIModel =", "\"id\": 12, \"uid\": \"test\"})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12, \"uid\":", "): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list(", "10), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder", "with self.assertRaises(ValueError): folder.create_folder(MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "import MagicMock, Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder import Folder class", "folder: Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"Folder permissions", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\":", "Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.get_folder_by_id(0) @patch(\"src.grafana_api.api.Api.call_the_api\")", "mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(), MagicMock()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_error_response(self,", "Folder = Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=list([{\"test\": \"test\"}])) call_the_api_mock.return_value =", "12}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"title\": None, \"id\": 12}]), folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock):", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value =", "self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_no_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.update_folder(MagicMock(),", "= Mock(return_value=list()) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.get_folders() @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_uid(self, call_the_api_mock): model:", "Mock = Mock() mock.json = Mock(return_value=dict({\"message\": \"error\"})) call_the_api_mock.return_value = mock with self.assertRaises(Exception): folder.delete_folder(\"test\")", "= list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_permissions_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "@patch(\"src.grafana_api.api.Api.call_the_api\") def test_create_folder(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "\"test\", \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": \"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True),", "with self.assertRaises(Exception): folder.get_folder_permissions(\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_permissions(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder:", "\"test\"})) @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "Folder(grafana_api_model=model) all_folder_ids_and_names_mock.return_value = list([{\"title\": \"test\", \"id\": 12}]) self.assertEqual( 12, folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") ) def test_get_folder_id_by_dashboard_path_general_path(self):", "test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model)", "\"id\": \"xty13y\"}] ) with self.assertRaises(Exception): folder.get_folder_id_by_dashboard_path(dashboard_path=\"test\") @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_all_folder_ids_and_names(self, call_the_api_mock): model: APIModel =", "= Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.create_folder(\"test\"))", "None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual( dict({\"title\": None, \"id\": 12}), folder.get_folder_by_uid(\"xty13y\") )", "Mock(return_value=dict({\"title\": None, \"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\")", "APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) self.assertEqual( 0, folder.get_folder_id_by_dashboard_path(dashboard_path=\"General\") ) def", "test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock: Mock", "\"id\": 12})) call_the_api_mock.return_value = mock self.assertEqual(dict({\"title\": None, \"id\": 12}), folder.get_folder_by_id(12)) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folder_by_id_no_id(self,", "Folder(grafana_api_model=model) mock: Mock = Mock() mock.json = Mock(return_value=dict({\"title\": \"test\", \"id\": 12})) call_the_api_mock.return_value =", "= Mock() mock.json = Mock(return_value=list([{\"id\": \"test\"}])) call_the_api_mock.return_value = mock self.assertEqual(list([{\"id\": \"test\"}]), folder.get_folder_permissions(\"test\")) @patch(\"src.grafana_api.api.Api.call_the_api\")", "folder.get_folders()) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders_error_response(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder =", "APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) call_the_api_mock.return_value = list() with self.assertRaises(ValueError): folder.get_folder_permissions(\"\") @patch(\"src.grafana_api.api.Api.call_the_api\")", "Folder = Folder(grafana_api_model=model) with self.assertRaises(ValueError): folder.get_folder_id_by_dashboard_path(dashboard_path=\"\") @patch(\"src.grafana_api.folder.Folder.get_all_folder_ids_and_names\") def test_get_folder_id_by_dashboard_path_no_title_match( self, all_folder_ids_and_names_mock ): model:", "mock: Mock = Mock() mock.json = Mock(return_value=dict()) call_the_api_mock.return_value = mock with self.assertRaises(ValueError): folder.create_folder(MagicMock())", "def test_create_folder_specified_uid(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(), token=MagicMock()) folder: Folder = Folder(grafana_api_model=model) mock:", "TestCase from unittest.mock import MagicMock, Mock, patch from src.grafana_api.model import APIModel from src.grafana_api.folder", "src.grafana_api.folder import Folder class FolderTestCase(TestCase): @patch(\"src.grafana_api.api.Api.call_the_api\") def test_get_folders(self, call_the_api_mock): model: APIModel = APIModel(host=MagicMock(),", "\"test\", \"id\": 12}), folder.update_folder(\"test\", overwrite=True), ) @patch(\"src.grafana_api.api.Api.call_the_api\") def test_update_folder_overwrite_true(self, call_the_api_mock): model: APIModel =" ]
[ "Created on 31 mar. 2020 @author: David ''' from sys import path path.append(\"/flash/userapp\")", ">= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found.", "again.\") except: print(\"I don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def", "esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1,", "#esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a", "found. Please, try again.\") except: print(\"I don't understand '{0}'. Please, try again.\".format(message)) class", "on-board user leds if ledId >= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}']", "and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try", "toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\") except: print(\"I don't understand '{0}'.", "def onReceived(self, message): code = message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING)))", "try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally:", "get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def", "sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self):", "async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message):", "message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception:", "''' Created on 31 mar. 2020 @author: David ''' from sys import path", "Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01 (PC6)", "uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection):", "echo = message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class", "exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection)", "button to finish.\") esp = None # Uncomment ESP8266 configuration properly #esp =", "Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266", "115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266 object first.\") loop", "Switch, Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection,", "ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user leds if ledId", "not found. Please, try again.\") except: print(\"I don't understand '{0}'. Please, try again.\".format(message))", "ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId))", "pyb import LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from", "3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\") except: print(\"I", "print(\"*** Esp8266 communication test ***\") print(\"Press switch button to finish.\") esp = None", "ledId >= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not", "EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo = message.strip() if", "#NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is on", "get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\",", "as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw", "loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP)", "print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try:", "print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The", "= message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex:", "ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch", "message): code = message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception", "async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while not", "'{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self,", "<= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\") except:", "from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class", "#NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266 object first.\") loop = get_event_loop()", "\"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program", "self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code =", "on 31 mar. 2020 @author: David ''' from sys import path path.append(\"/flash/userapp\") from", "stopped.\") def main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch button to finish.\")", "\"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\") if", "= Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"***", "#esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer()", "board has 3 on-board user leds if ledId >= 1 and ledId <=", "(PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create", "# On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is on CN7-11", "if not esp: raise Exception(\"Create a Esp8266 object first.\") loop = get_event_loop() esp.start()", "LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import", "!= \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message):", "serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while not sw.value(): await", "(PC6) and RX6 is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True)", "#esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup()", "print(\"Waiting for connections...\") sw = Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server", "CN7-01 (PC6) and RX6 is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200,", "Nucleo-F767ZI board has 3 on-board user leds if ledId >= 1 and ledId", "onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3", "onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip() if code", "else: print(\"Led not found. Please, try again.\") except: print(\"I don't understand '{0}'. Please,", "sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test ***\")", "LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self,", "if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def", "onReceived(self, message): code = message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except", "while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication", "Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\")", "def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo = message.strip() if echo !=", "= None # Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True)", "import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed:", "1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\") if __name__ == \"__main__\": main()", "= get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\",", "3 on-board user leds if ledId >= 1 and ledId <= 3: LED(ledId).toggle()", "#esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\")", "Please, try again.\") except: print(\"I don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection):", "Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266", "esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\") if __name__", "a Esp8266 object first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\",", "from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def", "{0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI", "has 3 on-board user leds if ledId >= 1 and ledId <= 3:", "def main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch button to finish.\") esp", "None # Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG", "code = message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as", "understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def", "onReceived(self, message): echo = message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self):", "NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is on CN7-11 (PC7) #esp", "Exception(\"Create a Esp8266 object first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT)", "115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6", "if ledId >= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led", "leds if ledId >= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else:", "main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch button to finish.\") esp =", "print(\"I don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\")", "David ''' from sys import path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin", "on CN7-01 (PC6) and RX6 is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8,", "Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId))", "def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId =", "RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip() if code != \"\": try:", "def onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has", "\"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\") if __name__ == \"__main__\":", "print(\"Press switch button to finish.\") esp = None # Uncomment ESP8266 configuration properly", "message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board", "to finish.\") esp = None # Uncomment ESP8266 configuration properly #esp = Esp8266(3,", "print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip() if code !=", "class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def", "message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user leds", "if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user", "sw = Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main():", "= Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01", "TX6 is on CN7-01 (PC6) and RX6 is on CN7-11 (PC7) #esp =", "Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On", "esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp))", "as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected:", "print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch button to", "= int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user leds if ledId >=", "#The Nucleo-F767ZI board has 3 on-board user leds if ledId >= 1 and", "Esp8266 object first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\")", "user leds if ledId >= 1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId))", "def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if", "and RX6 is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI", "'{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip()", "Esp8266 communication test ***\") print(\"Press switch button to finish.\") esp = None #", "31 mar. 2020 @author: David ''' from sys import path path.append(\"/flash/userapp\") from pyb", "Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266 object", "don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async", "try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user leds if", "''' from sys import path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from", "from sys import path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from uasyncio", "onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1])", "debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is", "object first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\",", "path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms as", "switch button to finish.\") esp = None # Uncomment ESP8266 configuration properly #esp", "esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while not sw.value(): await ua_sleep_ms(200)", "again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo =", "{0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while", "path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms", "not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test", "test ***\") print(\"Press switch button to finish.\") esp = None # Uncomment ESP8266", "class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo = message.strip()", "esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test ***\") print(\"Press switch button", "Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting", "\"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code", "int(message.split(\":\")[1]) #The Nucleo-F767ZI board has 3 on-board user leds if ledId >= 1", "ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw =", "#esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on", "code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async", "print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\") except: print(\"I don't understand", "try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection)", "not esp: raise Exception(\"Create a Esp8266 object first.\") loop = get_event_loop() esp.start() assert", "#esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx()", "message): echo = message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\")", "LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\") except: print(\"I don't", "ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI", "except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for", "{0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId", "= message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection):", "class RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip() if code != \"\":", "raise Exception(\"Create a Esp8266 object first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try:", "try again.\") except: print(\"I don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async", "except: print(\"I don't understand '{0}'. Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self):", "is on CN7-01 (PC6) and RX6 is on CN7-11 (PC7) #esp = Esp8266(6,", "On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is on CN7-11 (PC7)", "1 and ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please,", "\"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp):", "message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async", "print(\"Connected!\") async def onReceived(self, message): echo = message.strip() if echo != \"\": self.send(\"echo:", "is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not", "import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async", "= Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266", "Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and", "async def onReceived(self, message): code = message.strip() if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code,", "first.\") loop = get_event_loop() esp.start() assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\")", "!= \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex)) async def", "async def onReceived(self, message): echo = message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo))", "onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo = message.strip() if echo != \"\":", "@author: David ''' from sys import path path.append(\"/flash/userapp\") from pyb import LED, Switch,", "def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch() while not sw.value():", "esp: raise Exception(\"Create a Esp8266 object first.\") loop = get_event_loop() esp.start() assert esp.isPresent()", "async def onReceived(self, message): if message.startswith(\"LED\"): try: ledId = int(message.split(\":\")[1]) #The Nucleo-F767ZI board", "import LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms from uvacbot.io.esp8266", "communication test ***\") print(\"Press switch button to finish.\") esp = None # Uncomment", "CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise", "2020 @author: David ''' from sys import path path.append(\"/flash/userapp\") from pyb import LED,", "for connections...\") sw = Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\")", "echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self,", "await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def main(): print(\"*** Esp8266 communication test ***\") print(\"Press", "sys import path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from uasyncio import", "mar. 2020 @author: David ''' from sys import path path.append(\"/flash/userapp\") from pyb import", "async def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo = message.strip() if echo", "# Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG #", "RX6 is on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if", "***\") print(\"Press switch button to finish.\") esp = None # Uncomment ESP8266 configuration", "ledId <= 3: LED(ledId).toggle() print(\"Led['{0}'] toggled.\".format(ledId)) else: print(\"Led not found. Please, try again.\")", "connections...\") sw = Switch() while not sw.value(): await ua_sleep_ms(200) esp.stopServer() print(\"Server stopped.\") def", "esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN) loop.run_until_complete(serve(esp)) finally: esp._flushRx() esp.cleanup() print(\"Program finished\") if __name__ ==", "configuration properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6", "from pyb import LED, Switch, Pin from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms", "print(\"Led not found. Please, try again.\") except: print(\"I don't understand '{0}'. Please, try", "debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266 object first.\") loop =", "onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async def onReceived(self, message): if message.startswith(\"LED\"):", "on CN7-11 (PC7) #esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp:", "finish.\") esp = None # Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3,", "if code != \"\": try: exec(\"{0}\\r\\n\".format(str(code, Esp8266.BYTES_ENCODING))) except Exception as ex: self.send(\"Exception: {0}\\r\\n\".format(ex))", "properly #esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG # On NUCLEO-F767ZI TX6 is", "def onReceived(self, message): echo = message.strip() if echo != \"\": self.send(\"echo: '{0}'\\r\\n\".format(echo)) def", "self.send(\"Exception: {0}\\r\\n\".format(ex)) async def serve(esp): esp.initServer(EchoConnection) #esp.initServer(LedToggleConnection) print(\"Waiting for connections...\") sw = Switch()", "assert esp.isPresent() try: #esp.setOperatingMode(Esp8266.OP_MODE_CLIENT) #esp.join(\"SSID\", \"PASSWD\") #esp.setStaIpAddress(\"192.168.1.200\", \"192.168.1.1\") esp.setOperatingMode(Esp8266.OP_MODE_AP) esp.setAccessPointConfig(\"ESP8266-AP\", \"\", 1, Esp8266.SECURITY_OPEN)", "def onClose(self): print(\"Closed.\") class RemoteExecConnection(Connection): async def onReceived(self, message): code = message.strip() if", "import path path.append(\"/flash/userapp\") from pyb import LED, Switch, Pin from uasyncio import get_event_loop,", "Please, try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self, message):", "esp = None # Uncomment ESP8266 configuration properly #esp = Esp8266(3, Pin.board.D3, 115200,", "try again.\".format(message)) class EchoConnection(Connection): async def onConnected(self): print(\"Connected!\") async def onReceived(self, message): echo", "uvacbot.io.esp8266 import Connection, Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self):", "Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI if not esp: raise Exception(\"Create a Esp8266 object first.\")", "Esp8266 class LedToggleConnection(Connection): async def onConnected(self): print(\"Connected: {0}\".format(self._clientId)) def onClose(self): print(\"Closed: {0}\".format(self._clientId)) async" ]
[]
[ "\"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for", "as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def", "with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies", "User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls):", "[] for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\":", "@nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict']", "= { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from", "ps.append(p) p.start() for p in ps: p.join() if __name__==\"__main__\": import nose #nose.main() result", "TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self): import sys,", "p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p in ps: p.join()", "User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api", "nose.tools import nottest import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack", "proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self):", "= logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os, json, exceptions sys.path.append(\"..\")", "args=(apikeys, client_args, )) ps.append(p) p.start() for p in ps: p.join() if __name__==\"__main__\": import", "test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i,", "open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass", "teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300,", "300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker", "import User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args):", "from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as mp from", "= self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p =", "python # -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:", "import nottest import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from", "tweetf0rm.proxies import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import User class Handler(object):", "requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils", "} call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies'])", "import multiprocessing as mp from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None,", "import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in enumerate(self.config['apikeys']):", "= proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user]", "logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools", "mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p in ps: p.join() if __name__==\"__main__\":", "self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def", "twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict']", "[Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self):", "client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self):", "sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as mp", "bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()])", "def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def", "= logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest", "pass @classmethod def teardown_class(cls): pass def setup(self): import sys, os, json #sys.path.append(\"..\") with", "os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config", "import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from", "self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys", "json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = {", "= [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = {", "-*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG)", "config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self):", "= self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest", "setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb')", "multiprocessing as mp from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None, key=None):", "apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args)", "300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start()", "= json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys =", "full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import User", "logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import", "'rb') as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest", "p.start() for p in ps: p.join() if __name__==\"__main__\": import nose #nose.main() result =", "for p in ps: p.join() if __name__==\"__main__\": import nose #nose.main() result = nose.run(TestTwitterRateLimit)", "client_args, )) ps.append(p) p.start() for p in ps: p.join() if __name__==\"__main__\": import nose", "= { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args,", "user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod", "self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args", "for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300,", "key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class", "client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys,", "os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import", "pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod", "<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__)", "import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as", "user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass def", "coding: utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log =", "apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p", "class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api =", "logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys,", "from tweetf0rm.proxies import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import User class", "client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass", "proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys =", "'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f)", "client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps =", "ps = [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args =", "pass def setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f,", "client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass", "{'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list =", "pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\":", "#!/usr/bin/env python # -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG,", "json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config =", "#sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f)", "class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self): import", "from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def", "# -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')", "} logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p in", "def teardown_class(cls): pass def setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb')", "enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args)", "{ \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies", "@classmethod def teardown_class(cls): pass def setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'),", "json.load(config_f) self.proxies = json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13']", "from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user", "setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self): import sys, os, json #sys.path.append(\"..\")", "nottest import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies", "self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api,", "i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\":", "call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps", "in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] }", "mp from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass", "\"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list", "{ \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, ))", "logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p in ps:", "logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import", ")) ps.append(p) p.start() for p in ps: p.join() if __name__==\"__main__\": import nose #nose.main()", "proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data,", "as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies = json.load(proxy_f) def", "import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import", "def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args)", "as mp from tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data)", "import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import", "%(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os, json,", "teardown_class(cls): pass def setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as", "tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users", "Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys,", "def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for", "tweetf0rm.twitterapi.users import User class Handler(object): def append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys,", "def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] }", "@classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self): import sys, os,", "test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys,", "proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys = self.config['apikeys'][twitter_user] client_args", "proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in enumerate(self.config['apikeys']): apikeys", "-*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log", "= mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p in ps: p.join() if", "json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing", "open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f: self.config = json.load(config_f) self.proxies =", "from nose.tools import nottest import sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import", "@nottest def test_rate_limit(self): from tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = []", "requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os, json, exceptions", "= json.load(proxy_f) def teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args =", "append(self,data, bucket=None, key=None): logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176,", "= User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls): pass @classmethod def", "def setup_class(cls): pass @classmethod def teardown_class(cls): pass def setup(self): import sys, os, json", "logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os, json, exceptions sys.path.append(\"..\") from", "{'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p) p.start() for p", "\"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } call_user_api(apikeys, client_args) @nottest def test_rate_limit(self): from tweetf0rm.proxies import", "tweetf0rm.proxies import proxy_checker proxy_list = proxy_checker(self.proxies['proxies']) ps = [] for i, twitter_user in", "utf-8 -*- import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\")", "call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit: @classmethod def setup_class(cls):", "sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f:", "exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker import multiprocessing as", "\"timeout\": 300, \"proxies\": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict'] } logger.info(client_args) p = mp.Process(target=call_user_api, args=(apikeys, client_args, )) ps.append(p)", "def setup(self): import sys, os, json #sys.path.append(\"..\") with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'),", "import proxy_checker import multiprocessing as mp from tweetf0rm.twitterapi.users import User class Handler(object): def", "format='%(levelname)s: %(message)s') requests_log = logging.getLogger(\"requests\") requests_log.setLevel(logging.DEBUG) from nose.tools import nottest import sys, os,", "def teardown(self): pass @nottest def test_china_proxy(self): apikeys = self.config['apikeys']['i0mf0rmer13'] client_args = { \"timeout\":", "sys, os, json, exceptions sys.path.append(\"..\") from tweetf0rm.utils import full_stack from tweetf0rm.proxies import proxy_checker", "logger.info(data) pass def call_user_api(apikeys, client_args): user_api = User(apikeys=apikeys, client_args=client_args) user_api.find_all_friend_ids(53039176, [Handler()]) class TestTwitterRateLimit:" ]
[ "if Hub Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link':", "------- str Return url for Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status',", "settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED", "CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page", "settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL", "Return url for Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid':", "local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return", "def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return", "local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register", "import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register =", "'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign", "\"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True)", "Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0],", "CallCampaign Call Campaign Returns ------- str Return url for Start Call Campaign page", "Campaign Returns ------- str Return url for Resume Call Campaign page \"\"\" return", "Organizing Hub Feature Parameters ---------- feature_id : int Organizing Hub Feature id Returns", "@register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'],", "hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else:", "show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link,", ") @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page Parameters ----------", "# Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return {", "'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start", "str Return url for Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={", "BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED", "def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True )", "Hydra Promote Link if Hub Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED", "from django.urls import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from", "import settings from django.urls import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import", "not None and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id)", "return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], }", "logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL", "url for Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid,", "return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context):", "ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag", "'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL", "CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger =", "context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user)", "False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user)", "@register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub", "str Return url for Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={", "organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert", "'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign", "ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def", "settings from django.urls import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user", "Parameters ---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url for", "Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } )", "context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access to", "Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not None and hasattr(", "organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter(", "return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag", "\"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not None and hasattr( local_group, 'organizinghubaccess',", "---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Start", "template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context):", "organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL", "django.conf import settings from django.urls import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models", "Resume Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns -------", "Promote Link if Hub Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return", "return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group =", ": CallCampaign Call Campaign Returns ------- str Return url for Complete Call Campaign", "\"\"\"Show Hydra Promote Link if Hub Promote is not enabled\"\"\" show_promote_link = not", "'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email():", "import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL", "not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub templates", "call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page Parameters ---------- call_campaign : CallCampaign", "Return url for Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid':", "page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag", "Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request':", "has access to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is", "CallCampaign Call Campaign Returns ------- str Return url for Pause Call Campaign page", "bool Return True if user has access to Organizing Hub Feature \"\"\" local_group", "django.urls import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models", "'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False", "@register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote is not", "find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user)", "Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group,", "coding: utf-8 -*- from __future__ import unicode_literals from django import template from django.conf", "Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], }", "= local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context):", "return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag", "return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page Parameters", "ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing", "------- bool Return True if user has access to Organizing Hub Feature \"\"\"", "kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for", "templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request':", "for Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id':", "---------- feature_id : int Organizing Hub Feature id Returns ------- bool Return True", "ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def", "Feature Parameters ---------- feature_id : int Organizing Hub Feature id Returns ------- bool", "for Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id':", "ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def", "@register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url():", "\"\"\" URL for Resume Call Campaign page Parameters ---------- call_campaign : CallCampaign Call", "page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html',", "from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__)", "settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url():", "Start Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns -------", "takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group,", "else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups template tags\"\"\"", "group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url':", "Link if Hub Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return {", "Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } )", "Hub Login Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter( show=True ).first(),", "ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL", "page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag", "URL for Complete Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign", "\"\"\"TODO move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation", "Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], }", "\"\"\" URL for Pause Call Campaign page Parameters ---------- call_campaign : CallCampaign Call", "None and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return", "from django.conf import settings from django.urls import reverse_lazy from calls.models import CallCampaignStatus from", "call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Pause Call", "find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link':", "user has access to Organizing Hub Feature Parameters ---------- feature_id : int Organizing", "return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def", "Campaign Returns ------- str Return url for Pause Call Campaign page \"\"\" return", "{ 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def", "{ 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if", "'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL", "def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return", "Hub Promote is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link,", "local_group(context): \"\"\"TODO move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub", "ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def", "import template from django.conf import settings from django.urls import reverse_lazy from calls.models import", "Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not None and hasattr( local_group,", "organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL =", "= ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request':", "kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for", "Organizing Hub Feature id Returns ------- bool Return True if user has access", "Feature id Returns ------- bool Return True if user has access to Organizing", "CallCampaign Call Campaign Returns ------- str Return url for Complete Call Campaign page", "= settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def", "call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page Parameters ---------- call_campaign : CallCampaign", "str Return url for Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={", ": CallCampaign Call Campaign Returns ------- str Return url for Pause Call Campaign", "feature_id : int Organizing Hub Feature id Returns ------- bool Return True if", "Parameters ---------- feature_id : int Organizing Hub Feature id Returns ------- bool Return", "ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def", "menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return {", "# Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link", "unicode_literals from django import template from django.conf import settings from django.urls import reverse_lazy", "= settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign):", "enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], } #", "@register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page Parameters ---------- call_campaign", "ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html',", "'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def", "template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL", "@register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page Parameters ---------- call_campaign", "Return url for Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid':", "call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Start Call", "def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login", "'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL", "= settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL =", "show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing", "def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url':", "ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL", "Returns ------- str Return url for Complete Call Campaign page \"\"\" return reverse_lazy(", "} ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page Parameters", "for Complete Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns", "reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\"", "return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag", "takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to show\"\"\" return", "\"\"\" URL for Start Call Campaign page Parameters ---------- call_campaign : CallCampaign Call", "access to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not", "find_local_group_by_user(context['request'].user) if local_group is not None and hasattr( local_group, 'organizinghubaccess', ): access =", "@register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url():", "calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging", "Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return", "def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page Parameters ---------- call_campaign :", "ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'],", "access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def", "call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page Parameters ---------- call_campaign : CallCampaign", "find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\"", "'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL", ": int Organizing Hub Feature id Returns ------- bool Return True if user", "kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra", "reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context):", "has access to Organizing Hub Feature Parameters ---------- feature_id : int Organizing Hub", "Login Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter( show=True ).first(), 'request':", "@register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url():", "@register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True", "Campaign Returns ------- str Return url for Start Call Campaign page \"\"\" return", "find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register = template.Library()", "import reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import", "OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL", "Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0],", "if user has access to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if", "return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag(", "'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group =", "CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if Hub", "has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move", "@register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page Parameters ---------- call_campaign", "access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local", "= settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL =", "reverse_lazy from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert", "Return url for Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid':", "'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign", "Call Campaign Returns ------- str Return url for Start Call Campaign page \"\"\"", "return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check", "Call Campaign Returns ------- str Return url for Pause Call Campaign page \"\"\"", "= find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id):", "import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import logging logger", "\"\"\"Organizing Hub Login Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter( show=True", "def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert':", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django import template from", "Returns ------- str Return url for Resume Call Campaign page \"\"\" return reverse_lazy(", "takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote is not enabled\"\"\"", "return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign):", "} ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page Parameters", "Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0],", "return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet", "context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag", "url for Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid,", "): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True)", "def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True)", "URL for Start Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign", "from django import template from django.conf import settings from django.urls import reverse_lazy from", "Campaign Returns ------- str Return url for Complete Call Campaign page \"\"\" return", "def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return", "ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set", "feature_id): \"\"\" Check if user has access to Organizing Hub Feature Parameters ----------", "logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL", "local_group = find_local_group_by_user(context['request'].user) if local_group is not None and hasattr( local_group, 'organizinghubaccess', ):", "int Organizing Hub Feature id Returns ------- bool Return True if user has", "settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL", "@register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page Parameters ---------- call_campaign", "ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL", "Hub Feature Parameters ---------- feature_id : int Organizing Hub Feature id Returns -------", "settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete", "CallCampaign Call Campaign Returns ------- str Return url for Resume Call Campaign page", "group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context,", "------- str Return url for Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status',", "Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0],", "bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page", "return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign):", "has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access to Organizing Hub Feature Parameters", "settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\"", "return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups", "True if user has access to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user)", "tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group", "Returns ------- str Return url for Start Call Campaign page \"\"\" return reverse_lazy(", ") def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to show\"\"\" return {", "ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html',", "to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not None", "Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED", "django import template from django.conf import settings from django.urls import reverse_lazy from calls.models", "Returns ------- bool Return True if user has access to Organizing Hub Feature", "Alert snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter( show=True ).first(), 'request': context['request'],", "= template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL =", "= settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL =", ") @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page Parameters ----------", "settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED", "'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access", "@register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call", "template from django.conf import settings from django.urls import reverse_lazy from calls.models import CallCampaignStatus", "---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Pause", "{ 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag", "ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return", "url for Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid,", "return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign):", "group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has", "import logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL =", "call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page Parameters ---------- call_campaign : CallCampaign", "= settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL", "for Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id':", "show_promote_link, 'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group", "has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups template", "utf-8 -*- from __future__ import unicode_literals from django import template from django.conf import", "def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page Parameters ---------- call_campaign :", "\"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def", "page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url", "\"\"\" URL for Complete Call Campaign page Parameters ---------- call_campaign : CallCampaign Call", "__future__ import unicode_literals from django import template from django.conf import settings from django.urls", "Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link =", "'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user", "for Start Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns", "group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url():", "Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return", "\"\"\" Check if user has access to Organizing Hub Feature Parameters ---------- feature_id", "Check if user has access to Organizing Hub Feature Parameters ---------- feature_id :", "kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for", "Complete Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns -------", ": CallCampaign Call Campaign Returns ------- str Return url for Resume Call Campaign", "= settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL =", "is not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'],", "BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign page Parameters ----------", "import unicode_literals from django import template from django.conf import settings from django.urls import", "from organizing_hub.models import OrganizingHubLoginAlert import logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL", "is not None and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access =", "CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page", "ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL", "'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show", "settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL", "Call Campaign Returns ------- str Return url for Resume Call Campaign page \"\"\"", "@register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group':", "def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page Parameters ---------- call_campaign :", "Pause Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns -------", "------- str Return url for Start Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status',", ": CallCampaign Call Campaign Returns ------- str Return url for Start Call Campaign", "group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], } @register.simple_tag(takes_context=True) def", "for Resume Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns", "= logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL =", "to Organizing Hub Feature Parameters ---------- feature_id : int Organizing Hub Feature id", ") @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page Parameters ----------", "organizing_hub_nav(context): group = find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL,", "if local_group is not None and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess", "str Return url for Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={", "user has access to Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group", "= settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED =", "ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED", "def events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote is not enabled\"\"\" show_promote_link", "URL for Resume Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign", "local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True)", "'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context):", "\"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def", "for Pause Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns", "show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return", "if user has access to Organizing Hub Feature Parameters ---------- feature_id : int", "organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL", "'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if", "= not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub", "Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } )", "'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause", "call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call", "local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO", "settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag", "not enabled\"\"\" show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED return { 'show_promote_link': show_promote_link, 'request': context['request'], }", "id Returns ------- bool Return True if user has access to Organizing Hub", "call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.simple_tag def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call", "@register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) #", "} @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def", "for Pause Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id':", "} # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django import template", "@register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to", "url for Complete Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid,", "= access.has_feature_access_by_id(feature_id) return has_feature_access else: return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to", "logging logger = logging.getLogger(__name__) register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL", "and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access = access.has_feature_access_by_id(feature_id) return has_feature_access", "} ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call Campaign page Parameters", "from calls.models import CallCampaignStatus from local_groups.models import find_local_group_by_user from organizing_hub.models import OrganizingHubLoginAlert import", "} @register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access to Organizing", "ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL", "snippet set to show\"\"\" return { 'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter( show=True ).first(), 'request': context['request'], }", "call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume Call", "'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def organizing_hub_login_alert(context): \"\"\"Organizing Hub Login Alert snippet set to show\"\"\"", "'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote", "takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group': group, 'request': context['request'], }", "access to Organizing Hub Feature Parameters ---------- feature_id : int Organizing Hub Feature", "settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL", "def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for Complete Call Campaign", "reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\"", "def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access to Organizing Hub Feature", "from __future__ import unicode_literals from django import template from django.conf import settings from", "return { 'show_promote_link': show_promote_link, 'request': context['request'], } # Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True)", "def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return", "Organizing Hub Feature \"\"\" local_group = find_local_group_by_user(context['request'].user) if local_group is not None and", "---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Resume", "'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag", "= find_local_group_by_user(context['request'].user) show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED return { 'group': group, 'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL,", "CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\" URL for Pause Call Campaign page", "Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign Returns ------- str", "register = template.Library() BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL", "\"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def", "move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu", "reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag def call_campaign_pause_url(call_campaign): \"\"\"", "to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html',", "organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL @register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL", "@register.simple_tag(takes_context=True) def has_organizing_hub_feature_access(context, feature_id): \"\"\" Check if user has access to Organizing Hub", "organizing_docs_url(): return ORGANIZING_DOCS_URL @register.simple_tag def organizing_email(): return ORGANIZING_EMAIL @register.simple_tag def organizing_hub_call_callers_url(): return ORGANIZING_HUB_CALL_CALLERS_URL", "@register.simple_tag def organizing_hub_call_manage_url(): return ORGANIZING_HUB_CALL_MANAGE_URL @register.simple_tag def organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url():", "Returns ------- str Return url for Pause Call Campaign page \"\"\" return reverse_lazy(", "call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Resume Call", "call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], } ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link", "Organizing Hub templates @register.inclusion_tag('partials/group_link.html', takes_context=True) def group_link(context): group = find_local_group_by_user(context['request'].user) return { 'group':", "= settings.ORGANIZING_HUB_CALL_CALLERS_URL ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL ORGANIZING_HUB_PROMOTE_ENABLED =", "'organizing_guides_url': ORGANIZING_GUIDES_URL, 'organizing_docs_url': ORGANIZING_DOCS_URL, 'show_admins_link': show_admins_link, 'request': context['request'], } @register.simple_tag def organizing_docs_url(): return", "groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing Hub Navigation menu @register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True) def", "def call_campaign_start_url(call_campaign): \"\"\" URL for Start Call Campaign page Parameters ---------- call_campaign :", "} ) @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote", "'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.paused.value[0], } ) @register.simple_tag def call_campaign_resume_url(call_campaign): \"\"\" URL for Resume", ") @register.inclusion_tag('partials/events_nav.html', takes_context=True) def events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote is", "------- str Return url for Resume Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status',", "URL for Pause Call Campaign page Parameters ---------- call_campaign : CallCampaign Call Campaign", "Call Campaign page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.in_progress.value[0], }", "events_nav(context): \"\"\"Show Hydra Promote Link if Hub Promote is not enabled\"\"\" show_promote_link =", "-*- from __future__ import unicode_literals from django import template from django.conf import settings", "= find_local_group_by_user(context['request'].user) if local_group is not None and hasattr( local_group, 'organizinghubaccess', ): access", "Call Campaign Returns ------- str Return url for Complete Call Campaign page \"\"\"", "def local_group(context): \"\"\"TODO move to local groups template tags\"\"\" return find_local_group_by_user(context['request'].user) # Organizing", "page \"\"\" return reverse_lazy( 'organizing-hub-call-campaign-status', kwargs={ 'uuid': call_campaign.uuid, 'status_id': CallCampaignStatus.complete.value[0], } ) @register.simple_tag", "local_group is not None and hasattr( local_group, 'organizinghubaccess', ): access = local_group.organizinghubaccess has_feature_access", "call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Complete Call", "return False @register.simple_tag(takes_context=True) def local_group(context): \"\"\"TODO move to local groups template tags\"\"\" return", "---------- call_campaign : CallCampaign Call Campaign Returns ------- str Return url for Complete", "organizing_hub_call_script_url(): return ORGANIZING_HUB_CALL_SCRIPT_URL @register.simple_tag def organizing_hub_dashboard_url(): return ORGANIZING_HUB_DASHBOARD_URL @register.inclusion_tag( 'organizing_hub/tags/organizing_hub_login_alert.html', takes_context=True ) def", "Hub Feature id Returns ------- bool Return True if user has access to", "ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED ORGANIZING_HUB_CALL_CALLERS_URL", "Return True if user has access to Organizing Hub Feature \"\"\" local_group =", "= settings.ORGANIZING_HUB_PROMOTE_ENABLED @register.simple_tag def bsd_create_account_url(): return BSD_CREATE_ACCOUNT_URL @register.simple_tag def call_campaign_complete_url(call_campaign): \"\"\" URL for" ]
[ "as np import pyfits from pylab import matplotlib import matplotlib.pyplot as plt from", "192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax,", "python import getopt, sys, os import numpy as np import pyfits from pylab", "from pylab import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from", "matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext =", "#fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname +", "y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins,", "interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)]", "ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt", "np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t -", "extent = [80., 100., 192. - 155., 192. - 135, ] im =", "192. - 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False)", "y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2,", "tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6", "100., 192. - 155., 192. - 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\")", "= '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname + '.png'", "import getopt, sys, os import numpy as np import pyfits from pylab import", "192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3)", "#axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar() #plt.xlabel('Right Ascension') #plt.ylabel('Declination')", "fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying '", "- t.min())) ** .25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] = 0", "= fname + '.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t =", "= zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent = [80., 100., 192.", "from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0]", "sys, os import numpy as np import pyfits from pylab import matplotlib import", "fname = fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying ' + fname) title_str", "extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom", "im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100., 135., 155.,", "] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1,", "import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import", "#imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt = t **", "x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar() #plt.xlabel('Right Ascension')", "= t ** .25 tt[np.isnan(tt)] = 0 extent = [0., 192., 0., 192.]", "[5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\")", "- 155., 192. - 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()])", "** .25 tt[np.isnan(tt)] = 0 extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent,", "from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext =", "= 0 extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom =", "135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str)", "fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax =", "zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname =", "getopt, sys, os import numpy as np import pyfits from pylab import matplotlib", "import numpy as np import pyfits from pylab import matplotlib import matplotlib.pyplot as", "interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100., 135.,", "100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")", "fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt = t", "ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) #", "#imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] =", "t ** .25 tt[np.isnan(tt)] = 0 extent = [0., 192., 0., 192.] ax.imshow(tt,", "x2, y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax,", "out_fname = fname + '.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t", "= fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t", "interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom =", "as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits'", "sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying ' + fname)", "fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1]", "plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1,", "zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent = [80., 100., 192. -", "import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname =", "interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] = 0 extent = [0., 192.,", "tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent =", "numpy as np import pyfits from pylab import matplotlib import matplotlib.pyplot as plt", "= 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4,", "mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1]", "mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname", "import pyfits from pylab import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import", "'/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying", "axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80.,", "= axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 =", "zoom = 6 extent = [80., 100., 192. - 155., 192. - 135,", "+ fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax", "0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2,", "plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25,", "80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent = [80.,", "pylab import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator", "extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100.,", "tt[np.isnan(tt)] = 0 extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom", "= np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t", "fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t ,", "import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname", "fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min()))", "192. - 155., 192. - 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(),", "pyfits from pylab import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes", "= plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) **", "+ '.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig", "mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar() #plt.xlabel('Right Ascension') #plt.ylabel('Declination') plt.show() fig.savefig(out_fname)", "np import pyfits from pylab import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator", "' + fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4])", "155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar()", "6 extent = [80., 100., 192. - 155., 192. - 135, ] im", "axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent = [80., 100.,", "plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2)", "extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,]", "2, loc=3) # zoom = 6 extent = [80., 100., 192. - 155.,", ", interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt = t ** .25", "- 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1,", "155., 192. - 135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False)", "= [80., 100., 192. - 155., 192. - 135, ] im = axins.imshow(tzoom,", "= fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying ' + fname) title_str =", "#axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar() #plt.xlabel('Right", "80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\",", ".25 tt[np.isnan(tt)] = 0 extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\")", "print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1,", "os import numpy as np import pyfits from pylab import matplotlib import matplotlib.pyplot", "tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1,", "[80., 100., 192. - 155., 192. - 135, ] im = axins.imshow(tzoom, extent=extent,", "fname + '.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T)", ".25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] = 0 extent = [0.,", "#x1, x2, y1, y2 = 80., 100., 135., 155., #axins.set_xlim(x1, x2) #axins.set_ylim(y1, y2)", "t.min())) ** .25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] = 0 extent", "im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2, y1, y2", "t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111) #imshow(t , interpolation=\"nearest\")", "= tt[135:155, 80:100,] axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6 extent", "title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig = plt.figure(1, [5,4]) ax = fig.add_subplot(111)", "plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext", "** .25, interpolation=\"nearest\") tt = t ** .25 tt[np.isnan(tt)] = 0 extent =", "135, ] im = axins.imshow(tzoom, extent=extent, interpolation=\"nearest\") im.set_clim([tt.min(), tt.max()]) plt.xticks(visible=False) plt.yticks(visible=False) #x1, x2,", "0 extent = [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155,", "tt = t ** .25 tt[np.isnan(tt)] = 0 extent = [0., 192., 0.,", "= fig.add_subplot(111) #imshow(t , interpolation=\"nearest\") #imshow((t - t.min())) ** .25, interpolation=\"nearest\") tt =", "= sys.argv[1] fname = fname_ext.split('.')[0] out_fname = fname + '.png' print('displaying ' +", "#!/usr/bin/env python import getopt, sys, os import numpy as np import pyfits from", "= 6 extent = [80., 100., 192. - 155., 192. - 135, ]", "# zoom = 6 extent = [80., 100., 192. - 155., 192. -", "import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext", "= [0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins", "mpl_toolkits.axes_grid.inset_locator import mark_inset #fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits' fname_ext = sys.argv[1] fname = fname_ext.split('.')[0] out_fname", "'.png' print('displaying ' + fname) title_str = fname.split(os.sep)[-1] t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T) fig =", "matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid.inset_locator import mark_inset", "[0., 192., 0., 192.] ax.imshow(tt, extent=extent, interpolation=\"nearest\") tzoom = tt[135:155, 80:100,] axins =", "y2) mark_inset(ax, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\") #plt.title(title_str) #plt.colorbar() #plt.xlabel('Right Ascension') #plt.ylabel('Declination') plt.show()", "loc=3) # zoom = 6 extent = [80., 100., 192. - 155., 192." ]
[ "in candidate can speed up # the comparison for following tests if candidate", "reduce(op.mul, range(1, r+1), 1) return numer // denom # or / in Python", "card curr_count = 1 else: curr_count += 1 if (cards_dict[curr_card] < curr_count): return", "test different targets against the same candidate. # So the cached counts of", "target[1:]: if (card != curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card =", "= collections.defaultdict(int) for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict", "_local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check if cards of candidate contains", "_local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card] += 1", "return numer // denom # or / in Python 2 def init_standard_deck(): '''", "list of Card object ''' res = [i for i in range(52)] return", "= LocalObjs() def contains_cards(candidate, target): ''' Check if cards of candidate contains cards", "-1), 1) denom = reduce(op.mul, range(1, r+1), 1) return numer // denom #", "!= candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card]", "function # will test different targets against the same candidate. # So the", "== ''): return True curr_card = target[0] curr_count = 1 for card in", "import itertools import numpy as np import operator as op from functools import", "op from functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r):", "contains_cards(candidate, target): ''' Check if cards of candidate contains cards of target. Args:", "itertools import numpy as np import operator as op from functools import reduce", "// denom # or / in Python 2 def init_standard_deck(): ''' Initialize a", "self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check if cards", "<gh_stars>0 import math from typing import List import threading import collections import itertools", "tests if candidate keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate:", "candidate keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards =", "curr_count): return False curr_card = card curr_count = 1 else: curr_count += 1", "plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1", "dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot def encode_players_round_active(players, num_players = 4)", "return one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int)", "cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict", "= cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card =", "cards of candidate target (string): A string representing the number of cards of", "cached counts of each card in candidate can speed up # the comparison", "i in range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs", "Python 2 def init_standard_deck(): ''' Initialize a standard deck of 52 cards Returns:", "52 cards Returns: (list): A list of Card object ''' res = [i", "= reduce(op.mul, range(1, r+1), 1) return numer // denom # or / in", "# will test different targets against the same candidate. # So the cached", "card in candidate can speed up # the comparison for following tests if", "= 1 for card in target[1:]: if (card != curr_card): if (cards_dict[curr_card] <", "return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def", "True def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for card_id in", "import math from typing import List import threading import collections import itertools import", "r): r = min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1), 1) denom", "numer = reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul, range(1, r+1), 1)", "the number of cards of target Returns: boolean ''' # In normal cases,", "def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check", "string representing the number of cards of target Returns: boolean ''' # In", "encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for card_id in cards: plane[card_id]", "curr_count): return False return True def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52,", "get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot", "= 1 return one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane =", "against the same candidate. # So the cached counts of each card in", "np import operator as op from functools import reduce def nPr(n, r): return", "r+1), 1) return numer // denom # or / in Python 2 def", "in Python 2 def init_standard_deck(): ''' Initialize a standard deck of 52 cards", "same candidate. # So the cached counts of each card in candidate can", "targets against the same candidate. # So the cached counts of each card", "candidate cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict =", "as np import operator as op from functools import reduce def nPr(n, r):", "different targets against the same candidate. # So the cached counts of each", "1 return one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane = np.zeros(num_players,", "1) denom = reduce(op.mul, range(1, r+1), 1) return numer // denom # or", "in cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards,", "def init_standard_deck(): ''' Initialize a standard deck of 52 cards Returns: (list): A", "for following tests if candidate keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards", "1 for card in target[1:]: if (card != curr_card): if (cards_dict[curr_card] < curr_count):", "= _local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card = target[0] curr_count =", "return True curr_card = target[0] curr_count = 1 for card in target[1:]: if", "(string): A string representing the number of cards of target Returns: boolean '''", "the same candidate. # So the cached counts of each card in candidate", "_local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card", "return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] =", "or / in Python 2 def init_standard_deck(): ''' Initialize a standard deck of", "_local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card in candidate:", "one_hot[num_left_cards - 1] = 1 return one_hot def encode_players_round_active(players, num_players = 4) ->", "np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in players: plane[player_id] = 1 return", "''): return True curr_card = target[0] curr_count = 1 for card in target[1:]:", "import List import threading import collections import itertools import numpy as np import", "of target. Args: candidate (string): A string representing the cards of candidate target", "return True def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for card_id", "comparison for following tests if candidate keeps the same. if not _local_objs.cached_candidate_cards or", "contains cards of target. Args: candidate (string): A string representing the cards of", "-> np.ndarray: plane = np.zeros(52, dtype=int) for card_id in cards: plane[card_id] = 1", "plane = np.zeros(52, dtype=int) for card_id in cards: plane[card_id] = 1 return plane", "- 1] = 1 return one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray:", "can speed up # the comparison for following tests if candidate keeps the", "representing the number of cards of target Returns: boolean ''' # In normal", "class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate, target):", "False curr_card = card curr_count = 1 else: curr_count += 1 if (cards_dict[curr_card]", "range(1, r+1), 1) return numer // denom # or / in Python 2", "4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in players: plane[player_id] =", "Returns: (list): A list of Card object ''' res = [i for i", "for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict", "cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card = target[0]", "threading import collections import itertools import numpy as np import operator as op", "functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r =", "return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r) numer = reduce(op.mul, range(n,", "operator as op from functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def", "standard deck of 52 cards Returns: (list): A list of Card object '''", "/ in Python 2 def init_standard_deck(): ''' Initialize a standard deck of 52", "a standard deck of 52 cards Returns: (list): A list of Card object", "nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r) numer =", "of target Returns: boolean ''' # In normal cases, most continuous calls of", "this function # will test different targets against the same candidate. # So", "1 else: curr_count += 1 if (cards_dict[curr_card] < curr_count): return False return True", "= 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards -", "if (cards_dict[curr_card] < curr_count): return False return True def encode_cards(cards: List[int]) -> np.ndarray:", "= np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot def encode_players_round_active(players, num_players", "card in target[1:]: if (card != curr_card): if (cards_dict[curr_card] < curr_count): return False", "False return True def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for", "+= 1 if (cards_dict[curr_card] < curr_count): return False return True def encode_cards(cards: List[int])", "< curr_count): return False return True def encode_cards(cards: List[int]) -> np.ndarray: plane =", "for i in range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None", "__init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check if", "speed up # the comparison for following tests if candidate keeps the same.", "True curr_card = target[0] curr_count = 1 for card in target[1:]: if (card", "1] = 1 return one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane", "num_players = 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in players:", "cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''):", "reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r)", "def nCr(n, r): r = min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1),", "range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs()", "in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target", "target Returns: boolean ''' # In normal cases, most continuous calls of this", "range(n, n-r, -1), 1) denom = reduce(op.mul, range(1, r+1), 1) return numer //", "np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot def encode_players_round_active(players, num_players =", "collections import itertools import numpy as np import operator as op from functools", "number of cards of target Returns: boolean ''' # In normal cases, most", "Returns: boolean ''' # In normal cases, most continuous calls of this function", "import numpy as np import operator as op from functools import reduce def", "for card in target[1:]: if (card != curr_card): if (cards_dict[curr_card] < curr_count): return", "typing import List import threading import collections import itertools import numpy as np", "''' Check if cards of candidate contains cards of target. Args: candidate (string):", "import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r,", "Args: candidate (string): A string representing the cards of candidate target (string): A", "1) return numer // denom # or / in Python 2 def init_standard_deck():", "each card in candidate can speed up # the comparison for following tests", "(cards_dict[curr_card] < curr_count): return False curr_card = card curr_count = 1 else: curr_count", "= 1 else: curr_count += 1 if (cards_dict[curr_card] < curr_count): return False return", "None _local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check if cards of candidate", "A string representing the cards of candidate target (string): A string representing the", "return False curr_card = card curr_count = 1 else: curr_count += 1 if", "if candidate keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards", "numpy as np import operator as op from functools import reduce def nPr(n,", "= candidate cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict", "in target[1:]: if (card != curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card", "candidate target (string): A string representing the number of cards of target Returns:", "most continuous calls of this function # will test different targets against the", "curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card = card curr_count = 1", "curr_count += 1 if (cards_dict[curr_card] < curr_count): return False return True def encode_cards(cards:", "the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict", "of each card in candidate can speed up # the comparison for following", "2 def init_standard_deck(): ''' Initialize a standard deck of 52 cards Returns: (list):", "cards of target Returns: boolean ''' # In normal cases, most continuous calls", "So the cached counts of each card in candidate can speed up #", "# the comparison for following tests if candidate keeps the same. if not", "(target == ''): return True curr_card = target[0] curr_count = 1 for card", "if (card != curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card = card", "''' res = [i for i in range(52)] return res class LocalObjs(threading.local): def", "target. Args: candidate (string): A string representing the cards of candidate target (string):", "_local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card = target[0] curr_count = 1", "def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return", "cards of target. Args: candidate (string): A string representing the cards of candidate", "encode_players_round_active(players, num_players = 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in", "if (target == ''): return True curr_card = target[0] curr_count = 1 for", "numer // denom # or / in Python 2 def init_standard_deck(): ''' Initialize", "= target[0] curr_count = 1 for card in target[1:]: if (card != curr_card):", "string representing the cards of candidate target (string): A string representing the number", "will test different targets against the same candidate. # So the cached counts", "following tests if candidate keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards !=", "math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r) numer = reduce(op.mul, range(n, n-r,", "1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return True", "dtype=int) for card_id in cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards):", "= None _local_objs = LocalObjs() def contains_cards(candidate, target): ''' Check if cards of", "candidate contains cards of target. Args: candidate (string): A string representing the cards", "# In normal cases, most continuous calls of this function # will test", "the cached counts of each card in candidate can speed up # the", "r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r) numer = reduce(op.mul,", "curr_count = 1 else: curr_count += 1 if (cards_dict[curr_card] < curr_count): return False", "= np.zeros(52, dtype=int) for card_id in cards: plane[card_id] = 1 return plane def", "Initialize a standard deck of 52 cards Returns: (list): A list of Card", "(list): A list of Card object ''' res = [i for i in", "denom = reduce(op.mul, range(1, r+1), 1) return numer // denom # or /", "curr_count = 1 for card in target[1:]: if (card != curr_card): if (cards_dict[curr_card]", "boolean ''' # In normal cases, most continuous calls of this function #", "= 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in players: plane[player_id]", "cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8)", "if cards of candidate contains cards of target. Args: candidate (string): A string", "-> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id in players: plane[player_id] = 1", "of candidate contains cards of target. Args: candidate (string): A string representing the", "r = min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1), 1) denom =", "# or / in Python 2 def init_standard_deck(): ''' Initialize a standard deck", "deck of 52 cards Returns: (list): A list of Card object ''' res", "In normal cases, most continuous calls of this function # will test different", "normal cases, most continuous calls of this function # will test different targets", "not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for", "candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target ==", "counts of each card in candidate can speed up # the comparison for", "_local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card", "(card != curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card = card curr_count", "else: curr_count += 1 if (cards_dict[curr_card] < curr_count): return False return True def", "target[0] curr_count = 1 for card in target[1:]: if (card != curr_card): if", "# So the cached counts of each card in candidate can speed up", "def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for card_id in cards:", "or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card in", "in range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs =", "collections.defaultdict(int) for card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict =", "import collections import itertools import numpy as np import operator as op from", "(cards_dict[curr_card] < curr_count): return False return True def encode_cards(cards: List[int]) -> np.ndarray: plane", "keeps the same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate", "cards Returns: (list): A list of Card object ''' res = [i for", "A string representing the number of cards of target Returns: boolean ''' #", "plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards", "cards of candidate contains cards of target. Args: candidate (string): A string representing", "of cards of target Returns: boolean ''' # In normal cases, most continuous", "n-r) numer = reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul, range(1, r+1),", "!= curr_card): if (cards_dict[curr_card] < curr_count): return False curr_card = card curr_count =", "one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot def encode_players_round_active(players,", "''' Initialize a standard deck of 52 cards Returns: (list): A list of", "candidate (string): A string representing the cards of candidate target (string): A string", "(string): A string representing the cards of candidate target (string): A string representing", "''' # In normal cases, most continuous calls of this function # will", "same. if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict =", "candidate can speed up # the comparison for following tests if candidate keeps", "List import threading import collections import itertools import numpy as np import operator", "def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r = min(r, n-r) numer", "import operator as op from functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r))", "LocalObjs() def contains_cards(candidate, target): ''' Check if cards of candidate contains cards of", "of 52 cards Returns: (list): A list of Card object ''' res =", "of this function # will test different targets against the same candidate. #", "up # the comparison for following tests if candidate keeps the same. if", "from functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n, r): r", "denom # or / in Python 2 def init_standard_deck(): ''' Initialize a standard", "+= 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return", "curr_card = target[0] curr_count = 1 for card in target[1:]: if (card !=", "np.zeros(52, dtype=int) for card_id in cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards,", "return False return True def encode_cards(cards: List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int)", "= [i for i in range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards", "card_id in cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot =", "one_hot def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for", "res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate,", "A list of Card object ''' res = [i for i in range(52)]", "init_standard_deck(): ''' Initialize a standard deck of 52 cards Returns: (list): A list", "for card_id in cards: plane[card_id] = 1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot", "Card object ''' res = [i for i in range(52)] return res class", "card in candidate: cards_dict[card] += 1 _local_objs.cached_candidate_cards_dict = cards_dict cards_dict = _local_objs.cached_candidate_cards_dict if", "def encode_players_round_active(players, num_players = 4) -> np.ndarray: plane = np.zeros(num_players, dtype=int) for player_id", "= reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul, range(1, r+1), 1) return", "if (cards_dict[curr_card] < curr_count): return False curr_card = card curr_count = 1 else:", "cases, most continuous calls of this function # will test different targets against", "representing the cards of candidate target (string): A string representing the number of", "if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int)", "cards_dict = _local_objs.cached_candidate_cards_dict if (target == ''): return True curr_card = target[0] curr_count", "the cards of candidate target (string): A string representing the number of cards", "Check if cards of candidate contains cards of target. Args: candidate (string): A", "= card curr_count = 1 else: curr_count += 1 if (cards_dict[curr_card] < curr_count):", "n-r, -1), 1) denom = reduce(op.mul, range(1, r+1), 1) return numer // denom", "nCr(n, r): r = min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1), 1)", "[i for i in range(52)] return res class LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards =", "of candidate target (string): A string representing the number of cards of target", "max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1] = 1 return one_hot def", "of Card object ''' res = [i for i in range(52)] return res", "1 return plane def get_one_hot_array(num_left_cards, max_num_cards): one_hot = np.zeros(max_num_cards, dtype=np.int8) one_hot[num_left_cards - 1]", "res = [i for i in range(52)] return res class LocalObjs(threading.local): def __init__(self):", "math from typing import List import threading import collections import itertools import numpy", "List[int]) -> np.ndarray: plane = np.zeros(52, dtype=int) for card_id in cards: plane[card_id] =", "curr_card = card curr_count = 1 else: curr_count += 1 if (cards_dict[curr_card] <", "from typing import List import threading import collections import itertools import numpy as", "reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul, range(1, r+1), 1) return numer", "plane = np.zeros(num_players, dtype=int) for player_id in players: plane[player_id] = 1 return plane", "object ''' res = [i for i in range(52)] return res class LocalObjs(threading.local):", "LocalObjs(threading.local): def __init__(self): self.cached_candidate_cards = None _local_objs = LocalObjs() def contains_cards(candidate, target): '''", "continuous calls of this function # will test different targets against the same", "target (string): A string representing the number of cards of target Returns: boolean", "calls of this function # will test different targets against the same candidate.", "candidate. # So the cached counts of each card in candidate can speed", "the comparison for following tests if candidate keeps the same. if not _local_objs.cached_candidate_cards", "import threading import collections import itertools import numpy as np import operator as", "as op from functools import reduce def nPr(n, r): return math.factorial(n)/(math.factorial(n-r)) def nCr(n,", "1 if (cards_dict[curr_card] < curr_count): return False return True def encode_cards(cards: List[int]) ->", "min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul, range(1,", "def contains_cards(candidate, target): ''' Check if cards of candidate contains cards of target.", "< curr_count): return False curr_card = card curr_count = 1 else: curr_count +=", "np.ndarray: plane = np.zeros(52, dtype=int) for card_id in cards: plane[card_id] = 1 return", "candidate: _local_objs.cached_candidate_cards = candidate cards_dict = collections.defaultdict(int) for card in candidate: cards_dict[card] +=", "= min(r, n-r) numer = reduce(op.mul, range(n, n-r, -1), 1) denom = reduce(op.mul,", "target): ''' Check if cards of candidate contains cards of target. Args: candidate" ]
[ "from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def", "label else str() } ) return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None):", "attach_to_subid, params=None): params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid }", ") return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params = update_params( params,", ") return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params = update_params( params,", "self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params = params if params else dict()", "params, 'POST') def delete(self, subid, params=None): params = update_params( params, {'SUBID' : subid", "params, {'DCID' : dcid, 'size_gb' : size_gb, 'label' : label if label else", "self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params = update_params( params, {'SUBID' :", "update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/detach', params, 'POST') def label_set(self,", "size_gb, label=None, params=None): params = update_params( params, {'DCID' : dcid, 'size_gb' : size_gb,", "label } ) return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params = params", "subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def create(self, dcid,", "= update_params( params, {'SUBID' : subid, 'label' : label } ) return self.request('/v1/block/label_set',", "calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params =", "params=None): params = update_params( params, {'SUBID' : subid, 'label' : label } )", "'size_gb' : size_gb, 'label' : label if label else str() } ) return", "update_params( params, {'SUBID' : subid, 'label' : label } ) return self.request('/v1/block/label_set', params,", "update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params,", "subid, 'label' : label } ) return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None):", "{'SUBID' : subid, 'label' : label } ) return self.request('/v1/block/label_set', params, 'POST') def", "if params else dict() return self.request('/v1/block/list', params, 'GET') def resize(self, params=None): raise NotImplementedError()", "calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls'''", "'POST') def label_set(self, subid, label, params=None): params = update_params( params, {'SUBID' : subid,", "params, {'SUBID' : subid } ) return self.request('/v1/block/delete', params, 'POST') def detach(self, subid,", ") return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params = params if params", "class to handle Vultr Account API calls''' from .utils import VultrBase, update_params class", "params=None): params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } )", "params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST')", "self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None, params=None): params = update_params( params,", "params = update_params( params, {'SUBID' : subid, 'label' : label } ) return", "label, params=None): params = update_params( params, {'SUBID' : subid, 'label' : label }", "return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params = params if params else", "def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params = update_params(", ": label if label else str() } ) return self.request('/v1/block/create', params, 'POST') def", "VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self,", "{'SUBID' : subid } ) return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label,", "params = update_params( params, {'DCID' : dcid, 'size_gb' : size_gb, 'label' : label", "__init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params = update_params( params,", "'''Partial class to handle Vultr Account API calls''' from .utils import VultrBase, update_params", "params, 'POST') def list(self, params=None): params = params if params else dict() return", "def delete(self, subid, params=None): params = update_params( params, {'SUBID' : subid } )", "to handle Vultr Account API calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase):", ".utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self,", "self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params = update_params( params, {'SUBID' :", "'label' : label } ) return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params", "} ) return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None, params=None): params", "api_key) def attach(self, subid, attach_to_subid, params=None): params = update_params( params, {'SUBID' : subid,", "params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/detach', params, 'POST')", "subid } ) return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None): params", "'POST') def create(self, dcid, size_gb, label=None, params=None): params = update_params( params, {'DCID' :", "create(self, dcid, size_gb, label=None, params=None): params = update_params( params, {'DCID' : dcid, 'size_gb'", "{'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def", "<gh_stars>1-10 '''Partial class to handle Vultr Account API calls''' from .utils import VultrBase,", "handle Vultr Account API calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles", "Account API calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account", "def attach(self, subid, attach_to_subid, params=None): params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID'", "params=None): params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/detach', params,", "params, {'SUBID' : subid } ) return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid,", "label=None, params=None): params = update_params( params, {'DCID' : dcid, 'size_gb' : size_gb, 'label'", "return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None, params=None): params = update_params(", "detach(self, subid, params=None): params = update_params( params, {'SUBID' : subid } ) return", "params=None): params = params if params else dict() return self.request('/v1/block/list', params, 'GET') def", "= update_params( params, {'DCID' : dcid, 'size_gb' : size_gb, 'label' : label if", "str() } ) return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params =", "'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb,", "subid, params=None): params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/detach',", "'''Handles Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid,", "params, 'POST') def label_set(self, subid, label, params=None): params = update_params( params, {'SUBID' :", "{'SUBID' : subid } ) return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None):", "attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None, params=None):", "return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params = update_params( params, {'SUBID'", "= params if params else dict() return self.request('/v1/block/list', params, 'GET') def resize(self, params=None):", "api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params = update_params( params, {'SUBID'", ") return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None): params = update_params(", "params=None): params = update_params( params, {'DCID' : dcid, 'size_gb' : size_gb, 'label' :", "} ) return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params = update_params(", "API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params", "params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/delete', params, 'POST')", "Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None):", "update_params( params, {'DCID' : dcid, 'size_gb' : size_gb, 'label' : label if label", ": subid } ) return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None):", "def list(self, params=None): params = params if params else dict() return self.request('/v1/block/list', params,", ": dcid, 'size_gb' : size_gb, 'label' : label if label else str() }", "VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self,", ": size_gb, 'label' : label if label else str() } ) return self.request('/v1/block/create',", "if label else str() } ) return self.request('/v1/block/create', params, 'POST') def delete(self, subid,", "} ) return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params = params if", "Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid,", "label if label else str() } ) return self.request('/v1/block/create', params, 'POST') def delete(self,", "attach(self, subid, attach_to_subid, params=None): params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID' :", "list(self, params=None): params = params if params else dict() return self.request('/v1/block/list', params, 'GET')", "subid } ) return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params =", "subid, label, params=None): params = update_params( params, {'SUBID' : subid, 'label' : label", "def label_set(self, subid, label, params=None): params = update_params( params, {'SUBID' : subid, 'label'", "VultrBase.__init__(self, api_key) def attach(self, subid, attach_to_subid, params=None): params = update_params( params, {'SUBID' :", "params, {'SUBID' : subid, 'label' : label } ) return self.request('/v1/block/label_set', params, 'POST')", ": attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None,", "subid, attach_to_subid, params=None): params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid", "= update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach',", ": subid } ) return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params", "'POST') def list(self, params=None): params = params if params else dict() return self.request('/v1/block/list',", "} ) return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params = update_params(", "{'DCID' : dcid, 'size_gb' : size_gb, 'label' : label if label else str()", "} ) return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None): params =", "import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self, api_key):", "class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key) def", "params, 'POST') def detach(self, subid, params=None): params = update_params( params, {'SUBID' : subid", "def create(self, dcid, size_gb, label=None, params=None): params = update_params( params, {'DCID' : dcid,", "params = update_params( params, {'SUBID' : subid, 'attach_to_SUBID' : attach_to_subid } ) return", "Vultr Account API calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr", ": subid, 'attach_to_SUBID' : attach_to_subid } ) return self.request('/v1/block/attach', params, 'POST') def create(self,", "'POST') def detach(self, subid, params=None): params = update_params( params, {'SUBID' : subid }", "update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API calls''' def __init__(self, api_key): VultrBase.__init__(self, api_key)", "'POST') def delete(self, subid, params=None): params = update_params( params, {'SUBID' : subid }", ") return self.request('/v1/block/attach', params, 'POST') def create(self, dcid, size_gb, label=None, params=None): params =", "else str() } ) return self.request('/v1/block/create', params, 'POST') def delete(self, subid, params=None): params", "def detach(self, subid, params=None): params = update_params( params, {'SUBID' : subid } )", "params = params if params else dict() return self.request('/v1/block/list', params, 'GET') def resize(self,", ": subid, 'label' : label } ) return self.request('/v1/block/label_set', params, 'POST') def list(self,", "= update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/delete', params, 'POST') def", "params, 'POST') def create(self, dcid, size_gb, label=None, params=None): params = update_params( params, {'DCID'", "'label' : label if label else str() } ) return self.request('/v1/block/create', params, 'POST')", "params=None): params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/delete', params,", "size_gb, 'label' : label if label else str() } ) return self.request('/v1/block/create', params,", "API calls''' from .utils import VultrBase, update_params class VultrBlockStore(VultrBase): '''Handles Vultr Account API", ": label } ) return self.request('/v1/block/label_set', params, 'POST') def list(self, params=None): params =", "update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/delete', params, 'POST') def detach(self,", "dcid, 'size_gb' : size_gb, 'label' : label if label else str() } )", "dcid, size_gb, label=None, params=None): params = update_params( params, {'DCID' : dcid, 'size_gb' :", "subid, params=None): params = update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/delete',", "params if params else dict() return self.request('/v1/block/list', params, 'GET') def resize(self, params=None): raise", "label_set(self, subid, label, params=None): params = update_params( params, {'SUBID' : subid, 'label' :", "= update_params( params, {'SUBID' : subid } ) return self.request('/v1/block/detach', params, 'POST') def", "return self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None): params = update_params( params,", "delete(self, subid, params=None): params = update_params( params, {'SUBID' : subid } ) return", "return self.request('/v1/block/delete', params, 'POST') def detach(self, subid, params=None): params = update_params( params, {'SUBID'", "self.request('/v1/block/detach', params, 'POST') def label_set(self, subid, label, params=None): params = update_params( params, {'SUBID'" ]
[ "self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name is not None: query =", "Unless required by applicable law or agreed to in writing, software # distributed", "dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs = len(obs) for", "def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None", "equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An", "\"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "\"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self, dry=False):", "_db_obj = None def __init__(self, payload, session, connection): self._payload = payload self._connection =", "fg=\"red\") class BaseST: iotid = None _db_obj = None def __init__(self, payload, session,", "\"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": {", "False, }, \"positionArray\": { \"description\": \"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\":", "print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method, query=None,", "query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand: url", "{ \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\",", "\"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "\"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\":", "thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp", "] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\"", "under the License. # =============================================================================== import os.path import click import yaml from requests", "\"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"},", "for i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i", "self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection)", "from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if __name__ == \"__main__\": payload", "if resp and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def", "\"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\":", "if name is not None: query = f\"name eq '{name}'\" try: return next(self.get_locations(query))", "thing=None): entity = None if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages and", "if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\")", "== 200: return True def get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False,", "@property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection) return", "location.patch(dry) return location def get_sensors(self, query=None, name=None): if name is not None: query", "None: query = f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self,", "1, yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, )", "= os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile: obj = yaml.load(rfile,", "**kw ) if verbose: if resp and resp.status_code not in (200, 201): print(f\"request={request}\")", "IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = { \"type\":", "= self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity", "not None: query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None,", "request, resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code == 200: return resp.json()", "resp, dry=dry) class Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\":", "class Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\":", "return True def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request,", "\"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }", "self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload, dry=False): location =", "'{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity", "{\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\",", "\"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name = self._payload[\"name\"]", "IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload(): request", "records found\") return else: for v in resp[\"value\"]: if limit and yielded >=", "\"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, },", "dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not None: query", "self._connection func = getattr(self._session, request[\"method\"]) if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"],", "= f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry)", "from jsonschema import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg,", "entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"]", "self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw)", "yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if __name__ == \"__main__\":", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "[\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\",", "\"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\":", "iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code ==", "name is not None: query = f\"name eq '{name}'\" try: return next(self.get_locations(query)) except", "\"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST):", "\"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array of four", "self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name = self._payload[\"name\"] thing =", "None _db_obj = None def __init__(self, payload, session, connection): self._payload = payload self._connection", "4}], }, \"polygon\": { \"description\": \"An array of linear rings\", \"type\": \"array\", \"items\":", "\"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\":", "is None and method == \"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"]", "\"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False,", "== \"post\": if dry: return True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\",", "class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\":", "put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self,", "self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = { \"type\": \"object\", \"required\":", "eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None, location=None):", "'{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is", "check_exists and self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request,", "= Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False): location =", "1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp", "__init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if", "\"number\"}}, }, }, } class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\",", "+ 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\")", "with open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]:", "for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None,", "try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\": \"get\", \"url\":", "if verbose: if resp and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return", "{\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\":", "obs = self._payload[\"observations\"] n = 100 nobs = len(obs) for i in range(0,", "return yielded += 1 yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return", "limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query:", "resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema =", "\"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"],", "resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except", "\"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema =", "next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity", "in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False): if", "\"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name = self._payload[\"name\"] thing", "= \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if", "def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby is", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self): name =", "return True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code == 200:", "os.path.isfile(p): with open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not", "= f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp =", "**kw): try: return next(self.get(*args, **kw)) except StopIteration: return def exists(self): name = self._payload[\"name\"]", "class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\":", "positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array of", "\"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\":", "Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload,", "put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self,", "obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False): obs", "self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session,", "def __init__(self, payload, session, connection): self._payload = payload self._connection = connection self._session =", "if name is not None: query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity))", "not use this file except in compliance with the License. # You may", "if resp: try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return", "\"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def", "{\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\":", "entity is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\":", "obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload,", "query=None, name=None, location=None): entity = None if location: if isinstance(location, dict): location =", "if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\",", "Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if __name__ == \"__main__\": payload =", "session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as err: print(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True):", "return obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return", "\"get\": if resp.status_code == 200: return resp.json() elif request[\"method\"] == \"post\": if dry:", "agreed to in writing, software # distributed under the License is distributed on", "**kw): connection = self._connection func = getattr(self._session, request[\"method\"]) if not dry: resp =", "\"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array of positions\",", "abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded): if pages: if page_count", "a base url for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1]", "get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw):", "}, } def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n = 100", "get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None):", "**kw) def get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self,", "eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except IndexError: return self.iotid", "return if verbose: pv = \"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting", "print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args,", "= f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if __name__", "= self._send_request(request) resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if", "self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"],", "\"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self): name", "self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name is not None: query", "{\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\",", "def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp =", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self, dry=False): if", "session, connection): self._payload = payload self._connection = connection self._session = session def _validate_payload(self):", "request[\"method\"] == \"post\": if dry: return True if resp.status_code == 201: m =", "= self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp:", "url} def _send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection func = getattr(self._session,", "{ \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"],", "to in writing, software # distributed under the License is distributed on an", "if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\":", "implied. # See the License for the specific language governing permissions and #", "func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp and resp.status_code not", "as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please", "resp[\"value\"]: warning(\"no records found\") return else: for v in resp[\"value\"]: if limit and", "{\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array of linear rings\", \"type\": \"array\",", "\"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"},", "\"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array", "\"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"},", "yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc", "yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is not", "exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name", "\"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"},", "# =============================================================================== import os.path import click import yaml from requests import Session from", "\"An array of four positions where the first equals the last\", \"allOf\": [{\"$ref\":", "if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose:", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "}, } def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"]", "201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False): if request[\"method\"] ==", "StopIteration: return def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if", "payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self, payload,", "\"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\",", "\"position\": { \"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"},", "\"number\"}}, }, }, } def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method, query=None, entity=None, orderby=None,", "verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor", "put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self,", "self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST):", "= self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = {", "\"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\":", "None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw):", ") yield from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "\"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "} class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\":", "self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\":", "f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield", "{ \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\":", "_validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as err: print( f\"Validation failed", "}, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, },", "self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def", "verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request,", "thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if", "params = [] if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby =", "\"patch\": if resp.status_code == 200: return True def get( self, query, entity=None, pages=None,", "next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\": \"get\", \"url\": next_url},", "} class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"},", "dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid,", "\"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\":", "location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not None: query = f\"name eq", "return location def put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return", "limit and yielded >= limit: return yielded += 1 yield v try: next_url", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "yielded += 1 yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield", "Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": {", "= f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw):", "query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if", "self._session, self._connection).get( None, entity=entity, **kw ) if __name__ == \"__main__\": payload = {}", "is not None: query = f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query)", "user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url:", "\"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\"", "[] if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby)", "print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i : i + n] pd =", "self._connection).get( None, entity=entity, **kw ) if __name__ == \"__main__\": payload = {} l", "if self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs = len(obs) for i", "\"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]},", "iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"]", "except KeyError: return yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1,", "p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile: obj =", "f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method, query=None, entity=None,", "pages: return if verbose: pv = \"\" if pages: pv = \"/{pages}\" verbose_message(", "dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except StopIteration: return def", "= f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url =", "= location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj =", "chunk, } ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url", "query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if", "request = {\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp,", "if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" )", "}, }, } class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\",", "\"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\":", "= re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid", "= resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema", "\"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema = { \"type\": \"object\",", "f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self,", "name is not None: query = f\"name eq '{name}'\" yield from Sensors(None, self._session,", "self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None):", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema = { \"type\":", "self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\":", "if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not", "from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and", "name=None, location=None): entity = None if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"]", "**kw)) except StopIteration: return def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq", "}, \"polygon\": { \"description\": \"An array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\":", "\"location\"], \"definitions\": { \"position\": { \"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2,", "entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\"", "def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as err: print( f\"Validation", "{\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "\"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"},", "\"patch\": url = f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\") if orderby:", "entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"]", "2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array of", "orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\"", "url = f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\" else: params =", "query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield", "\"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\":", "= Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session,", "self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload, dry=False): location = Locations(payload,", "resp = self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj = resp except IndexError:", "return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload(): request =", "orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params:", "= obs[i : i + n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\":", "fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\") return else: for v in", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema =", "\"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity", "\"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\",", "{\"method\": method, \"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection", "<filename>sta/client.py # =============================================================================== # Copyright 2021 ross # # Licensed under the Apache", "} ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url =", "\"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\": {", "{\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter", "self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except IndexError: return", "the specific language governing permissions and # limitations under the License. # ===============================================================================", "0: pages = abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded): if", "{\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array of positions\", \"type\":", "{\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "def get_thing(self, query=None, name=None, location=None): entity = None if location: if isinstance(location, dict):", "payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload,", "{\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"],", "See the License for the specific language governing permissions and # limitations under", "resp.status_code == 200: return resp.json() elif request[\"method\"] == \"post\": if dry: return True", "{\"type\": \"number\"}}, }, }, } def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0]", "getattr(self._session, request[\"method\"]) if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw )", "class BaseST: iotid = None _db_obj = None def __init__(self, payload, session, connection):", "sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry)", "f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\" else: params = [] if", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if", "= self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST):", "True except ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" )", "entity = \"Locations({})/Things\".format(location) if name is not None: query = f\"name eq '{name}'\"", "\"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\",", "isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not None:", "0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return", "of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": {", "{ \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\":", "dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw))", "def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def", "pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages and pages < 0: pages", "dry=dry) class Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": {", "json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args,", "query=None, name=None): if name is not None: query = f\"name eq '{name}'\" yield", "as err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self,", "if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as", "from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from Things(None, self._session,", "\"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array of four positions where", "== \"__main__\": payload = {} l = Locations(payload, None, None) l._validate_payload() # =============", "def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except StopIteration: return def exists(self):", "\"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\":", "\"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\":", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\") return else: for", "start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request,", "self._session, self._connection) location.put(dry) return location def put_thing(self, payload, dry=False): thing = Things(payload, self._session,", "== 200: return resp.json() elif request[\"method\"] == \"post\": if dry: return True if", "m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\":", "= self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\",", "payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return location", "base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request", "\"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\":", "method, \"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection func", "= len(obs) for i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk", "IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True else:", "Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\":", "if orderby is None and method == \"get\": orderby = \"$orderby=id asc\" base_url", "params: url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\":", "f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"])", "import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def", "a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with", ") if verbose: if resp and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\")", "\"user\": user, \"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p):", "func = getattr(self._session, request[\"method\"]) if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]),", "class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": {", "location def put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return thing", "first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\":", "if check_exists and self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request) resp =", "KIND, either express or implied. # See the License for the specific language", "yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded ) start_request", "\"$orderby=id desc\" def get_items(request, page_count, yielded): if pages: if page_count >= pages: return", "{\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw):", "if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp,", "\"description\": \"An array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, },", "= { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\":", "return resp.json() elif request[\"method\"] == \"post\": if dry: return True if resp.status_code ==", "{\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self, dry=False): if self._validate_payload(): obs =", "# verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp =", "\"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code,", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self): name = self._payload[\"name\"]", "return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = { \"type\": \"object\",", "ANY KIND, either express or implied. # See the License for the specific", "resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except", ") def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby", "\"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection func =", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\",", "verbose: pv = \"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count +", "f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\"", "datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not None:", "'{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except IndexError: return self.iotid =", "base url for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"]", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\",", "\"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\":", "os.path import click import yaml from requests import Session from jsonschema import validate,", "rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema =", "verbose: if resp and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp", "expand=None, limit=None ): if orderby is None and method == \"get\": orderby =", "click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\") return else: for v", "{ \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class", "dry=False): if request[\"method\"] == \"get\": if resp.status_code == 200: return resp.json() elif request[\"method\"]", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"],", "'{name}'\") if resp: try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"]", "{ \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\":", "put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self,", "}, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\":", "= iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code", "Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload,", "\"post\": if dry: return True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\"))", "\"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, },", "pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url", "click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj = None def __init__(self, payload,", "specific language governing permissions and # limitations under the License. # =============================================================================== import", "{ \"position\": { \"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\":", ">= limit: return yielded += 1 yield v try: next_url = resp[\"@iot.nextLink\"] except", "\"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\",", "{\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if", "Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self, payload, dry=False): thing = Things(payload,", "isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get(", "page_count >= pages: return if verbose: pv = \"\" if pages: pv =", "pages = abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded): if pages:", "<PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\")", "is not None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self,", "\"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, },", "\"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": {", "def get_location(self, query=None, name=None): if name is not None: query = f\"name eq", "self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj = resp except", "\"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"},", "\"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": {", "thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs", "= self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp:", "not None: query = f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "{\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\", \"description\",", "dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream", "201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid", "base_url = input(\"Please enter a base url for a SensorThings instance>> \") if", "not None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream,", "applicable law or agreed to in writing, software # distributed under the License", "name=None): if name is not None: query = f\"name eq '{name}'\" try: return", "class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\":", "request[\"method\"] == \"get\": if resp.status_code == 200: return resp.json() elif request[\"method\"] == \"post\":", ": i + n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\":", "KeyError: return yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self, dry=False): if self._validate_payload(): obs", "position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": {", "location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not None: query =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "[{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array of four positions", "pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url: p", "\"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self):", "enter a base url for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url =", "eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name", "\"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array of linear rings\", \"type\":", "{\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\":", "}, \"linearRing\": { \"description\": \"An array of four positions where the first equals", "obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter a", "entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity =", "base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True)", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, },", "if name is not None: query = f\"name eq '{name}'\" yield from Sensors(None,", "name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq", "def get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None,", "except StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity = None if location:", "IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = { \"type\":", "{\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema", "compliance with the License. # You may obtain a copy of the License", "\"lineString\": { \"description\": \"An array of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"},", "{err}. {self._payload}\" ) def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None ):", "yaml from requests import Session from jsonschema import validate, ValidationError import re IDREGEX", "{ \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\":", "= f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw):", "more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array", "self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs = len(obs) for i in", "resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema =", "\"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"],", "instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\")", "{ \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\":", "asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is", "= self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\",", "resp[\"value\"]: if limit and yielded >= limit: return yielded += 1 yield v", "\"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, }", "if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name", "self.patch() else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request,", "return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\",", "= f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def", "\"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema = { \"type\": \"object\",", "= thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj =", "\"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "\"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "warning(\"no records found\") return else: for v in resp[\"value\"]: if limit and yielded", "self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection)", "if dry: return True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if", "\"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A single position\",", "ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload,", "200: return resp.json() elif request[\"method\"] == \"post\": if dry: return True if resp.status_code", "= Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self, payload, dry=False): thing =", "**kw): yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name", "return True class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\":", "where the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\":", "\"url\": next_url}, page_count + 1, yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity,", "import Session from jsonschema import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def", "try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as err: print( f\"Validation failed for", "self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session()", "yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from Things(None,", "\"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema", "\"definitions\": { \"position\": { \"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\":", "self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\" else: params", "self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session,", "StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity = None if location: if", "(the \"License\"); # you may not use this file except in compliance with", "v in resp[\"value\"]: if limit and yielded >= limit: return yielded += 1", "**kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from", "pages < 0: pages = abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count,", "if limit and yielded >= limit: return yielded += 1 yield v try:", "# Unless required by applicable law or agreed to in writing, software #", "f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict):", "datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False): location", "by applicable law or agreed to in writing, software # distributed under the", "in resp[\"value\"]: if limit and yielded >= limit: return yielded += 1 yield", "dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None,", "\"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema =", "def get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages", "def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def", "url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self,", "}, \"positionArray\": { \"description\": \"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"},", "not resp[\"value\"]: warning(\"no records found\") return else: for v in resp[\"value\"]: if limit", "file except in compliance with the License. # You may obtain a copy", "\"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\",", "== 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid =", "=============================================================================== # Copyright 2021 ross # # Licensed under the Apache License, Version", "\"get\", \"url\": next_url}, page_count + 1, yielded ) start_request = self._generate_request( \"get\", query=query,", "[\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A single position\", \"type\":", "def base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None,", "Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp)", "url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd,", "if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self, request,", "f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method", "return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def", "self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection)", "\"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p,", "is not None: query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self,", "next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity = None if", "return True except ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\"", "dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return self.patch() else: request =", "self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False): location = Locations(payload, self._session,", "base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile: yaml.dump(self._connection,", "< 0: pages = abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded):", "query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages and pages <", "}, }, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\":", "def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp: try:", "request[\"method\"] == \"patch\": if resp.status_code == 200: return True def get( self, query,", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "= self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\",", "if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not", "Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is not None: query", "\"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, },", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "import click import yaml from requests import Session from jsonschema import validate, ValidationError", "{\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema = { \"type\":", "{ \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"},", "expand=None, limit=None, verbose=False, orderby=None, ): if pages and pages < 0: pages =", "m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return", "open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self): return", "self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self,", "{\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class", "from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query,", "from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name is not", ") # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp", "} def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp", "yield from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists", "eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from", "{\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\":", "'{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity = None if", "= self._payload[\"observations\"] n = 100 nobs = len(obs) for i in range(0, nobs,", "{ \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\":", "requests import Session from jsonschema import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\")", "def get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None,", "def put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return thing def", "for v in resp[\"value\"]: if limit and yielded >= limit: return yielded +=", "= self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj = resp", "err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method,", "self._validate_payload(): if check_exists and self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request) resp", "}, }, } def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n =", "\"An array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, }", "from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from Locations(None, self._session,", "re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class", "verbose=False, orderby=None, ): if pages and pages < 0: pages = abs(pages) orderby", "{ \"description\": \"An array of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\":", "\"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array of positions\", \"type\": \"array\",", "dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection =", "getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except StopIteration: return def exists(self): name", "# =============================================================================== # Copyright 2021 ross # # Licensed under the Apache License,", "\"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [", "\"Locations({})/Things\".format(location) if name is not None: query = f\"name eq '{name}'\" return next(self.get_things(query,", "if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name", "f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if __name__ ==", "self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages and pages", "{\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "= yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter a base", "self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def", "locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False):", "obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session,", "__name__ == \"__main__\": payload = {} l = Locations(payload, None, None) l._validate_payload() #", "v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\": \"get\",", "None: query = f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self,", "last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array of", "obs def patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid =", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "= resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False):", "dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False):", "\"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"},", "obs.put(dry) return obs def add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry,", "query = f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self,", "not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if", "connection self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "four positions where the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}],", "resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None,", "): if orderby is None and method == \"get\": orderby = \"$orderby=id asc\"", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "return thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return", "method == \"patch\": url = f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\")", "\"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A single position\", \"type\": \"array\",", "\"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self): name = self._payload[\"name\"] location", "query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield", "\"string\"}, }, } class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\":", "= ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream =", "query = f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None,", "dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self, payload, dry=False):", "connection[\"pwd\"]), **kw ) if verbose: if resp and resp.status_code not in (200, 201):", "# limitations under the License. # =============================================================================== import os.path import click import yaml", "query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0) def put(self,", "\"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self): return self._connection[\"base_url\"]", "get_thing(self, query=None, name=None, location=None): entity = None if location: if isinstance(location, dict): location", "self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = { \"type\":", "if pages: if page_count >= pages: return if verbose: pv = \"\" if", "limit=None ): if orderby is None and method == \"get\": orderby = \"$orderby=id", "n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ]", "\"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, },", "\"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ],", "elif request[\"method\"] == \"patch\": if resp.status_code == 200: return True def get( self,", "f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp = self._send_request(request,", "\"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url =", "if __name__ == \"__main__\": payload = {} l = Locations(payload, None, None) l._validate_payload()", "}, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A", "\"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\":", "query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby is None and method ==", "\"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\",", "the License for the specific language governing permissions and # limitations under the", "Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter a base url for", "License. # =============================================================================== import os.path import click import yaml from requests import Session", "entity=entity, **kw ) if __name__ == \"__main__\": payload = {} l = Locations(payload,", "datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw )", "\"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, },", "import os.path import click import yaml from requests import Session from jsonschema import", "100 nobs = len(obs) for i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i,", "self.iotid = iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if", "pages: if page_count >= pages: return if verbose: pv = \"\" if pages:", "return sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return", "{\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\":", "yielded): if pages: if page_count >= pages: return if verbose: pv = \"\"", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code == 200: return resp.json() elif", "= { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\":", "try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class", "**kw) def get_location(self, query=None, name=None): if name is not None: query = f\"name", "\"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\",", "self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try:", "params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\"", "def get_sensors(self, query=None, name=None): if name is not None: query = f\"name eq", "[\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\":", "yield from Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name is", "\"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if not", "if not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\") return", "patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return", "- url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp =", "self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp =", "\"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST):", "self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as", "location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if", "check_exists=False) return obs def patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session, self._connection)", "location = Locations(payload, self._session, self._connection) location.put(dry) return location def put_thing(self, payload, dry=False): thing", "location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp", "\"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\":", "get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded ) start_request = self._generate_request(", "entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0) def put(self, dry=False,", "url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self, request, dry=False, verbose=True,", "Version 2.0 (the \"License\"); # you may not use this file except in", "loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor", "wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self):", "connection = self._connection func = getattr(self._session, request[\"method\"]) if not dry: resp = func(", "f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"):", "\"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\":", "base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile)", "dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\":", "not None: query = f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def", "def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>}", "= self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None, user=None,", "\"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\")", "expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self, request, dry=False,", "\"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, },", "): if pages and pages < 0: pages = abs(pages) orderby = \"$orderby=id", "}, } class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"],", "**kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from", "**kw ) if __name__ == \"__main__\": payload = {} l = Locations(payload, None,", "\"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\":", "f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request,", "location = Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return location def get_sensors(self,", "base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if", "\"ObservedProperty\", \"Sensor\", ], } def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid", "validate(instance=self._payload, schema=self._schema) return True except ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}.", "dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp", "if verbose: pv = \"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count", "\"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self,", "f\"Things({thing})/Datastreams\" if name is not None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query,", "\"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\":", "i + n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk,", "ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw)", "name is not None: query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def", "\"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"],", "True class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"},", "connection): self._payload = payload self._connection = connection self._session = session def _validate_payload(self): try:", "limit=limit, ) yield from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload():", "\"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema = {", "self._payload = payload self._connection = connection self._session = session def _validate_payload(self): try: validate(instance=self._payload,", "if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__", "def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return self.patch() else:", "'{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except IndexError: return self.iotid =", "= [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url =", "} class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\":", "return datastream def put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return", "as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self): return self._connection[\"base_url\"] def", "\"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\":", "return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity = None if thing:", "= { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"},", "return self.patch() else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return", ">= pages: return if verbose: pv = \"\" if pages: pv = \"/{pages}\"", "Things(None, self._session, self._connection).get(query, **kw) def get_location(self, query=None, name=None): if name is not None:", "{\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An array of four positions where the", "{ \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\":", "obs[i : i + n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"],", "return resp def _parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code", "resp, dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url,", "\"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\",", "self._connection) location.put(dry) return location def put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection)", "{\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of two or more positions\",", "url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request)", "base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__ url =", "self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try:", "datastream.put(dry) return datastream def put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry)", "except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = {", "f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw): connection", "sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs", "True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid =", "f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity =", "= \"Locations({})/Things\".format(location) if name is not None: query = f\"name eq '{name}'\" return", "get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists():", "(200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False): if request[\"method\"]", "OF ANY KIND, either express or implied. # See the License for the", "resp.text) elif request[\"method\"] == \"patch\": if resp.status_code == 200: return True def get(", "\"An array of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], },", "# verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp) if not", "m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code, resp.text) elif", "if self._validate_payload(): if check_exists and self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request)", "{\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "get_observed_properties(self, query=None, name=None): if name is not None: query = f\"name eq '{name}'\"", "resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid", "\"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A single position\", \"type\": \"array\", \"minItems\":", "if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile:", "self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection)", "=============================================================================== import os.path import click import yaml from requests import Session from jsonschema", "200: return True def get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None,", "return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = { \"type\": \"object\",", "except ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def", "\"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "}, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "def _send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection func = getattr(self._session, request[\"method\"])", "{ \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\":", "\"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\":", "else: params = [] if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby", "thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is", "\"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\":", "self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Observations(BaseST):", "payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload,", "\"number\"}}, }, }, } def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n", "\") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as", "\"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\",", "return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except", "{\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity = None", "}, } class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\":", "name=None): if name is not None: query = f\"name eq '{name}'\" yield from", "if not self._connection[\"base_url\"]: base_url = input(\"Please enter a base url for a SensorThings", "ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg,", "= f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if", "def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None,", "\"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\",", "verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\")", "_schema = { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\":", "or agreed to in writing, software # distributed under the License is distributed", "= location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is not None: query = f\"name", "= self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\" else:", "= self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try:", "class Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\":", "\"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"},", "\".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj)", "self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try: return", "\"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\":", "if resp.status_code == 200: return True def get( self, query, entity=None, pages=None, expand=None,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "url = f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\") if orderby: if", "[ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name", "check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return self.patch() else: request = self._generate_request(\"post\")", "dry=False): location = Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return location def", "page_count, yielded): if pages: if page_count >= pages: return if verbose: pv =", "License. # You may obtain a copy of the License at # #", "True else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code == 200: return", "\"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\":", "name is not None: query = f\"name eq '{name}'\" yield from ObservedProperties(None, self._session,", "location def get_sensors(self, query=None, name=None): if name is not None: query = f\"name", "if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session,", "{ \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"},", "eq '{name}'\") if resp: try: self._db_obj = resp except IndexError: return self.iotid =", "}, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def", "return next(self.get(*args, **kw)) except StopIteration: return def exists(self): name = self._payload[\"name\"] resp =", "click import yaml from requests import Session from jsonschema import validate, ValidationError import", "resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]:", "= getattr(self._session, request[\"method\"]) if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw", "add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self,", "thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not None: query = f\"name eq", "= payload self._connection = connection self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "# Copyright 2021 ross # # Licensed under the Apache License, Version 2.0", "= Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload, dry=False):", "else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp,", "try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def", "\"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\":", "input(\"Please enter a base url for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url", "with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def base_url(self):", "class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": {", "= thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not None: query = f\"name", "= m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code, resp.text) elif request[\"method\"] ==", "array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An", "\"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, },", "License, Version 2.0 (the \"License\"); # you may not use this file except", "_generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby is None", "{\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\":", "location.iotid = iotid location.patch(dry) return location def get_sensors(self, query=None, name=None): if name is", "\"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\",", "{\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") # verbose_message(request[\"url\"]) #", "not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__ url", "\"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\":", "\"resultTime\": {\"type\": \"string\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}},", "\"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema = {", "n = 100 nobs = len(obs) for i in range(0, nobs, n): print(\"loading", "entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if pages and pages < 0:", "fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj = None", "\"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"},", "{\"type\": \"number\"}}, }, }, } def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"]", "\"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of two or", "\"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST):", "\"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema = { \"type\":", "the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array", "== \"patch\": url = f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\") if", "dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False):", "name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj =", "verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request -----------------\") #", "pv = \"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv}", "self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is not None: query = f\"name", "if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True else: print(resp.status_code, resp.text)", "Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\":", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "if entity is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method ==", "location.put(dry) return location def put_thing(self, payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry)", "eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except IndexError: return self.iotid", "{ \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\":", "return True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid", "print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\": if", "\"description\": \"An array of four positions where the first equals the last\", \"allOf\":", "0) def put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return self.patch()", "import yaml from requests import Session from jsonschema import validate, ValidationError import re", "self, method, query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby is None and", "except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = {", "self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection)", "\"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\",", "ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request(", "{\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, }", "not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\":", "except StopIteration: return def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\")", "{ \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\":", "self._connection = connection self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True", "self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0)", "self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session,", "= [] if limit: params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\"", "\"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\":", "is not None: query = f\"name eq '{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query)", "or implied. # See the License for the specific language governing permissions and", "self._payload[\"observations\"] n = 100 nobs = len(obs) for i in range(0, nobs, n):", "verbose=True, **kw): connection = self._connection func = getattr(self._session, request[\"method\"]) if not dry: resp", "yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from Locations(None,", "= iotid location.patch(dry) return location def get_sensors(self, query=None, name=None): if name is not", "Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False): location = Locations(payload,", "{\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\",", "\"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, {", "resp.status_code == 200: return True def get( self, query, entity=None, pages=None, expand=None, limit=None,", "self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self,", "= None if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location)", "\"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\":", "\"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, },", "dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False):", "= f\"{base_url}/{entity}\" if method == \"patch\": url = f\"{url}({self.iotid})\" else: params = []", "limit=None, verbose=False, orderby=None, ): if pages and pages < 0: pages = abs(pages)", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema =", "not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False):", "= ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False): obs =", "try: return next(self.get(*args, **kw)) except StopIteration: return def exists(self): name = self._payload[\"name\"] resp", "payload self._connection = connection self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return", "1 yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from get_items(", "not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\") return else:", "= f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None):", "orderby=None, expand=None, limit=None ): if orderby is None and method == \"get\": orderby", "[\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": {", "thing.put(dry) return thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry)", "except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload():", "iotid location.patch(dry) return location def get_sensors(self, query=None, name=None): if name is not None:", "use this file except in compliance with the License. # You may obtain", "self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url =", "{\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema", "**kw) def get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self,", "{ \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [", "\"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"}, \"Datastream\": {", "def _parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code == 200:", "payload = {} l = Locations(payload, None, None) l._validate_payload() # ============= EOF =============================================", "= self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no", "elif request[\"method\"] == \"post\": if dry: return True if resp.status_code == 201: m", "orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query}))", "resp.json() elif request[\"method\"] == \"post\": if dry: return True if resp.status_code == 201:", "= 100 nobs = len(obs) for i in range(0, nobs, n): print(\"loading chunk", "chunk = obs[i : i + n] pd = [ { \"Datastream\": self._payload[\"Datastream\"],", "None and method == \"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if", "resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if", "self._connection) location.iotid = iotid location.patch(dry) return location def get_sensors(self, query=None, name=None): if name", "for the specific language governing permissions and # limitations under the License. #", "and yielded >= limit: return yielded += 1 yield v try: next_url =", "dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = { \"type\": \"object\", \"required\":", "{\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"},", "name is not None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def", "return obs def add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False)", "return location def get_sensors(self, query=None, name=None): if name is not None: query =", ") if __name__ == \"__main__\": payload = {} l = Locations(payload, None, None)", "\"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class", "self.iotid = self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\")", "thing = Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False): obs", "self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self,", "self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\")", "resp and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self,", "{ \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"],", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "pass def get_thing(self, query=None, name=None, location=None): entity = None if location: if isinstance(location,", "= \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"-------------- Request", "{\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\": {", "self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session,", "= f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity", "= input(\"Please enter a base url for a SensorThings instance>> \") if base_url.endswith(\"/\"):", "\"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"},", "\"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\",", "= f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None,", "\"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array of linear", "+ n] pd = [ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, }", "], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\":", "put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self,", "obs def add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return", "def add_observations(self, payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp) if not resp:", "not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile:", "\"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\":", "def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs =", "self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry)", "array of four positions where the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"},", "for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url", "self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw)", "\"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0) def", "if name is not None: query = f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity))", "self._connection) datastream.put(dry) return datastream def put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection)", "wfile) self._session = Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc =", "dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False):", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\",", "with the License. # You may obtain a copy of the License at", "two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\":", "2021 ross # # Licensed under the Apache License, Version 2.0 (the \"License\");", "}, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": {", "= f\"{url}({self.iotid})\" else: params = [] if limit: params.append(f\"$top={limit}\") if orderby: if not", "orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded): if pages: if page_count >=", "positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of", "\"Sensor\", ], } def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid =", "__init__(self, payload, session, connection): self._payload = payload self._connection = connection self._session = session", "linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema", "law or agreed to in writing, software # distributed under the License is", "limitations under the License. # =============================================================================== import os.path import click import yaml from", "self._session = Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None,", "query=None, name=None, thing=None): entity = None if thing: if isinstance(thing, dict): thing =", "def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj = None def", "resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp and", "range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i : i +", "Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query,", "self._connection).get(query, **kw) def get_things(self, query=None, **kw): yield from Things(None, self._session, self._connection).get(query, **kw) def", "orderby=None, ): if pages and pages < 0: pages = abs(pages) orderby =", "obs.put(dry) return obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry)", "if name is not None: query = f\"name eq '{name}'\" yield from ObservedProperties(None,", "== \"get\": if resp.status_code == 200: return resp.json() elif request[\"method\"] == \"post\": if", "nobs)) chunk = obs[i : i + n] pd = [ { \"Datastream\":", "'{name}'\" yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None,", "entity=None, orderby=None, expand=None, limit=None ): if orderby is None and method == \"get\":", "Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\":", "if request[\"method\"] == \"get\": if resp.status_code == 200: return resp.json() elif request[\"method\"] ==", "\"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\": \"string\"},", "\"definition\": {\"type\": \"string\"}, }, } class Datastreams(BaseST): _schema = { \"type\": \"object\", \"properties\":", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"}, },", "ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\":", "\"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\"", "= Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return location def get_sensors(self, query=None,", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\":", "self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False): obs = Observations(payload, self._session,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "{\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\":", "{\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded ) start_request = self._generate_request( \"get\",", "= { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\":", "location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if name is", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "entity = None if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity =", "# verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"],", "\"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url", "json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection", "return obs def patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid", "\"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema = {", "\"linearRing\": { \"description\": \"An array of four positions where the first equals the", "params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if", "entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw ) if", "the License. # =============================================================================== import os.path import click import yaml from requests import", "\"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, }", "dry=False, verbose=True, **kw): connection = self._connection func = getattr(self._session, request[\"method\"]) if not dry:", "}, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\":", "\"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\":", "get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield", "= \"$orderby=id desc\" def get_items(request, page_count, yielded): if pages: if page_count >= pages:", "self._connection[\"base_url\"]: base_url = input(\"Please enter a base url for a SensorThings instance>> \")", "\"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"],", "\"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of two or more positions\", \"allOf\":", "None def __init__(self, payload, session, connection): self._payload = payload self._connection = connection self._session", "yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\":", "payload, session, connection): self._payload = payload self._connection = connection self._session = session def", "auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp and resp.status_code not in (200,", "of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST):", "Session from jsonschema import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg):", "iotid, payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return", "self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"):", "\"description\": \"An array of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}],", "if pages and pages < 0: pages = abs(pages) orderby = \"$orderby=id desc\"", "[\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\":", "entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity = None if thing: if isinstance(thing,", "_schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\":", "\"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ {", "[\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\":", "}, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": { \"position\": { \"description\": \"A single", "if method == \"patch\": url = f\"{url}({self.iotid})\" else: params = [] if limit:", "expand=expand, limit=limit, ) yield from get_items(start_request, 0, 0) def put(self, dry=False, check_exists=True): if", "class Client: def __init__(self, base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user,", "entity = None if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity =", "+ 1, yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit,", "= resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema", "self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": { \"name\":", "\"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\":", "sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs", "f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if", "} def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp", "True def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "'{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream =", "Things(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\":", "{ \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\",", "resp def _parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code ==", "\"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, },", "request[\"method\"]) if not dry: resp = func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if", "*args, **kw): try: return next(self.get(*args, **kw)) except StopIteration: return def exists(self): name =", "{ \"description\": \"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\":", "pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) #", "\"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\":", "this file except in compliance with the License. # You may obtain a", "obs def put_datastream(self, payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream", "\"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\":", "next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None): entity = None if thing: if", "def locations(self): loc = Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload,", "i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i :", "re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid =", "_schema = { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"},", "\"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\"", "} class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"],", "array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, }, }, } class", "next_url}, page_count + 1, yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby,", "lid = location[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj", "}, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "resp, dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except StopIteration: return", "dry: return True if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m:", "from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is not None:", "= f\"Things({thing})/Datastreams\" if name is not None: query = f\"name eq '{name}'\" return", "ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\":", "orderby is None and method == \"get\": orderby = \"$orderby=id asc\" base_url =", "return if not resp[\"value\"]: warning(\"no records found\") return else: for v in resp[\"value\"]:", "payload, dry=False): datastream = Datastreams(payload, self._session, self._connection) datastream.put(dry) return datastream def put_location(self, payload,", "nobs = len(obs) for i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs))", "self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\")", "if resp.status_code == 201: m = IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1]", "and method == \"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not", "return {\"method\": method, \"url\": url} def _send_request(self, request, dry=False, verbose=True, **kw): connection =", "yield from ObservedProperties(None, self._session, self._connection).get(query) def get_datastreams(self, query=None, **kw): yield from Datastreams(None, self._session,", "self.iotid = self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\":", "return True class Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\",", "request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp and resp.status_code not in", "= { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\":", "if page_count >= pages: return if verbose: pv = \"\" if pages: pv", "\"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\",", "page_count + 1, yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand,", "\"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]},", "import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\")", "click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj =", "\"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\":", "self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter a base url for a", "\"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"]", "not self._connection[\"base_url\"]: base_url = input(\"Please enter a base url for a SensorThings instance>>", "\"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of two or more", "datastream def put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return location", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "pages and pages < 0: pages = abs(pages) orderby = \"$orderby=id desc\" def", "self._db_obj[\"@iot.id\"] return True class Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\",", "_send_request(self, request, dry=False, verbose=True, **kw): connection = self._connection func = getattr(self._session, request[\"method\"]) if", "schema=self._schema) return True except ValidationError as err: print( f\"Validation failed for {self.__class__.__name__}. {err}.", "= self._connection func = getattr(self._session, request[\"method\"]) if not dry: resp = func( request[\"url\"],", "put(self, dry=False, check_exists=True): if self._validate_payload(): if check_exists and self.exists(): return self.patch() else: request", "base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None:", "limit: return yielded += 1 yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError:", "def get_observed_properties(self, query=None, name=None): if name is not None: query = f\"name eq", "base_url=None, user=None, pwd=None): self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not", "}, } class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\",", "None, entity=entity, **kw ) if __name__ == \"__main__\": payload = {} l =", "required by applicable law or agreed to in writing, software # distributed under", "found\") return else: for v in resp[\"value\"]: if limit and yielded >= limit:", "method == \"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"):", "request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class", "== \"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url", "n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i : i + n] pd", "self._session, self._connection).get(query) def get_observed_properties(self, query=None, name=None): if name is not None: query =", "= base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session", "True class Observations(BaseST): _schema = { \"type\": \"object\", \"required\": [\"phenomenonTime\", \"result\", \"resultTime\", \"Datastream\"],", "orderby = f\"$orderby={orderby}\" params.append(orderby) if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url", "_schema = { \"type\": \"object\", \"required\": [\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"},", "= connection self._session = session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except", "self._send_request(request) resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if not", "\"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array of two", "= abs(pages) orderby = \"$orderby=id desc\" def get_items(request, page_count, yielded): if pages: if", "payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self,", "True def get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ): if", "= self._db_obj[\"@iot.id\"] return True def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp", "\"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\":", "desc\" def get_items(request, page_count, yielded): if pages: if page_count >= pages: return if", "next(self.get(*args, **kw)) except StopIteration: return def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name", "yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url = input(\"Please enter a base url", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "}, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\":", "= IDREGEX.search(resp.headers.get(\"location\", \"\")) if m: iotid = m.group(\"id\")[1:-1] self.iotid = iotid return True", "query=None, name=None): if name is not None: query = f\"name eq '{name}'\" try:", "\"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\":", "= { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"},", "\"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"Sensor\": {", "Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry) return location def get_sensors(self, query=None, name=None):", "{\"$ref\": \"#/definitions/position\"}, }, }, { \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\":", "params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\":", "\"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\":", "None if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity = \"Locations({})/Things\".format(location) if", "-----------------\") # verbose_message(request[\"url\"]) # verbose_message(\"----------------------------------------\") resp = self._send_request(request) resp = self._parse_response(request, resp) if", "\"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\":", "url for a SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] =", "+= 1 yield v try: next_url = resp[\"@iot.nextLink\"] except KeyError: return yield from", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def put(self, dry=False): if self._validate_payload():", "yielded >= limit: return yielded += 1 yield v try: next_url = resp[\"@iot.nextLink\"]", "}, \"lineString\": { \"description\": \"An array of two or more positions\", \"allOf\": [{\"$ref\":", "\"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"},", "payload, dry=False): thing = Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload,", "return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity = None", "} def put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs", "{}/{}\".format(i, nobs)) chunk = obs[i : i + n] pd = [ {", "# you may not use this file except in compliance with the License.", "get_location(self, query=None, name=None): if name is not None: query = f\"name eq '{name}'\"", "params.append(f\"$top={limit}\") if orderby: if not orderby.startswith(\"$orderby\"): orderby = f\"$orderby={orderby}\" params.append(orderby) if query: #", "_schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\":", "def add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs", "\"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\": { \"description\": \"An", "chunk {}/{}\".format(i, nobs)) chunk = obs[i : i + n] pd = [", "Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False): obs = ObservationsArray(payload,", "the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": {", "\"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\":", "self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry)", "2}], }, \"linearRing\": { \"description\": \"An array of four positions where the first", "= { \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\":", "\"additionalItems\": False, }, \"positionArray\": { \"description\": \"An array of positions\", \"type\": \"array\", \"items\":", "self._connection = {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url: p =", "method, query=None, entity=None, orderby=None, expand=None, limit=None ): if orderby is None and method", "if params: url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method,", "or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\": { \"description\": \"An", "not None: query = f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass", "payload, dry=False): obs = ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload,", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, \"ObservedProperty\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\":", "f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None,", "IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST:", "\"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\":", "Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": {", "License for the specific language governing permissions and # limitations under the License.", "ObservationsArray(payload, self._session, self._connection) obs.put(dry) return obs def add_observation(self, payload, dry=False): obs = Observations(payload,", "\"#/definitions/linearRing\"}, }, }, } class Sensors(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\",", "from requests import Session from jsonschema import validate, ValidationError import re IDREGEX =", "self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity =", "\"License\"); # you may not use this file except in compliance with the", "open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if not self._connection[\"base_url\"]: base_url", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "}, } class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\", \"components\"],", "is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\": url", "base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url} resp", "\"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": {", ") start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from", "\"number\"}}, }, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ],", "\"definition\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"symbol\": {\"type\": \"string\"}, \"definition\": {\"type\": \"string\"}, },", "query = f\"name eq '{name}'\" yield from Sensors(None, self._session, self._connection).get(query) def get_observed_properties(self, query=None,", "Copyright 2021 ross # # Licensed under the Apache License, Version 2.0 (the", "self._session, self._connection) location.iotid = iotid location.patch(dry) return location def get_sensors(self, query=None, name=None): if", "= self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def", "self._parse_response(request, resp, dry=dry) def getfirst(self, *args, **kw): try: return next(self.get(*args, **kw)) except StopIteration:", "[\"name\", \"description\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"Locations\": { \"type\":", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "= datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None, self._session, self._connection).get( None, entity=entity, **kw", "], } def exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"]", "print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\":", "[\"observations\", \"Datastream\", \"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": {", "= f\"name eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream,", "lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj", "def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid = location[\"@iot.id\"] resp =", "resp: try: self._db_obj = resp except IndexError: return self.iotid = self._db_obj[\"@iot.id\"] return True", "base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile: obj", "\"__main__\": payload = {} l = Locations(payload, None, None) l._validate_payload() # ============= EOF", "Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def patch_location(self, iotid, payload, dry=False): location", "self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except IndexError: return", "{\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, },", "{\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\":", "base_url[:-1] self._connection[\"base_url\"] = base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session =", "= session def _validate_payload(self): try: validate(instance=self._payload, schema=self._schema) return True except ValidationError as err:", "\"result\", \"resultTime\", \"Datastream\"], \"properties\": { \"phenomenonTime\": {\"type\": \"string\"}, \"result\": {\"type\": \"number\"}, \"resultTime\": {\"type\":", "from get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded ) start_request =", "def put_location(self, payload, dry=False): location = Locations(payload, self._session, self._connection) location.put(dry) return location def", "{ \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": {", "put(self, dry=False): if self._validate_payload(): obs = self._payload[\"observations\"] n = 100 nobs = len(obs)", "datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from", "\"#/definitions/polygon\"}, }, }, ], }, }, \"required\": [\"name\", \"description\", \"encodingType\", \"location\"], \"definitions\": {", "= self.getfirst(f\"name eq '{name}'\", entity=f\"Locations({lid})/Things\") if resp: try: self._db_obj = resp except IndexError:", "return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"]", "language governing permissions and # limitations under the License. # =============================================================================== import os.path", "try: return next(self.get_locations(query)) except StopIteration: pass def get_thing(self, query=None, name=None, location=None): entity =", "request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry)", "return yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count + 1, yielded )", "{ \"description\": \"An array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"}, },", "}, }, } class ObservationsArray(BaseST): _schema = { \"type\": \"object\", \"required\": [\"observations\", \"Datastream\",", "{self.__class__.__name__}. {err}. {self._payload}\" ) def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None", "{\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\":", "return True def get( self, query, entity=None, pages=None, expand=None, limit=None, verbose=False, orderby=None, ):", "{ \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class", "resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count +", "location=None): entity = None if location: if isinstance(location, dict): location = location[\"@<EMAIL>\"] entity", "_parse_response(self, request, resp, dry=False): if request[\"method\"] == \"get\": if resp.status_code == 200: return", "resp = self._send_request(request) resp = self._parse_response(request, resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return", "len(obs) for i in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk =", "= {\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"),", "{\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": { \"name\":", "None: query = f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration: pass def", "\"string\"}, }, } class ObservedProperties(BaseST): _schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\",", "= None def __init__(self, payload, session, connection): self._payload = payload self._connection = connection", "\"definition\": {\"type\": \"string\"}, }, }, \"observationType\": {\"type\": \"string\"}, \"Thing\": { \"type\": \"object\", \"required\":", "# params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand: url =", "\"components\"], \"properties\": { \"observations\": {\"type\": \"array\"}, \"components\": {\"type\": \"array\"}, \"Datastream\": { \"type\": \"object\",", "2.0 (the \"License\"); # you may not use this file except in compliance", "SensorThings instance>> \") if base_url.endswith(\"/\"): base_url = base_url[:-1] self._connection[\"base_url\"] = base_url with open(p,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "add_observation(self, payload, dry=False): obs = Observations(payload, self._session, self._connection) obs.put(dry, check_exists=False) return obs def", "\"description\": \"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": {", "iotid = None _db_obj = None def __init__(self, payload, session, connection): self._payload =", "patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid = iotid location.patch(dry)", "\"polygon\": { \"description\": \"An array of linear rings\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/linearRing\"},", "# # Unless required by applicable law or agreed to in writing, software", "{\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"],", "express or implied. # See the License for the specific language governing permissions", "return def exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp:", "resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request, resp,", "exists(self): name = self._payload[\"name\"] resp = self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj", "{ \"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"},", "\"type\": \"object\", \"required\": [\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\":", "request, dry=False, verbose=True, **kw): connection = self._connection func = getattr(self._session, request[\"method\"]) if not", "if os.path.isfile(p): with open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader) self._connection.update(**obj) if", "either express or implied. # See the License for the specific language governing", "of four positions where the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\":", "= None _db_obj = None def __init__(self, payload, session, connection): self._payload = payload", "\"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema = { \"type\": \"object\",", "json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema = { \"type\": \"object\",", "single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\": False, }, \"positionArray\":", "jsonschema import validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\")", "[\"type\", \"coordinates\"], \"oneOf\": [ { \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\":", "if query: # params.append(urlencode({\"$filter\": query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand:", "\"positionArray\": { \"description\": \"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, },", "return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry)", "query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None, thing=None):", "\"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], } def exists(self): name =", "the License. # You may obtain a copy of the License at #", "None if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if", "\"An array of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\":", "entity = f\"Things({thing})/Datastreams\" if name is not None: query = f\"name eq '{name}'\"", "print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code == 200: return True def", "\"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\": \"string\"}, \"location\": { \"type\": \"object\",", "[ { \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"]", "[ { \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\":", "\"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], },", "Datastreams(None, self._session, self._connection).get(query, **kw) def get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query,", "dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def put_datastream(self, payload, dry=False):", "exists(self): name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name", "get_datastream(self, query=None, name=None, thing=None): entity = None if thing: if isinstance(thing, dict): thing", "pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} - url={request['url']}\" ) # verbose_message(\"--------------", "def get_items(request, page_count, yielded): if pages: if page_count >= pages: return if verbose:", "if not resp[\"value\"]: warning(\"no records found\") return else: for v in resp[\"value\"]: if", "\"get\": orderby = \"$orderby=id asc\" base_url = self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url =", "get_sensors(self, query=None, name=None): if name is not None: query = f\"name eq '{name}'\"", "= func( request[\"url\"], auth=(connection[\"user\"], connection[\"pwd\"]), **kw ) if verbose: if resp and resp.status_code", "{\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self): name = self._payload[\"name\"] location =", "verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj", "dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not None: query", "resp) if not resp: click.secho(request[\"url\"], fg=\"red\") return if not resp[\"value\"]: warning(\"no records found\")", "def patch(self, dry=False): if self._validate_payload(): request = self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry)", "{ \"type\": \"object\", \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\":", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "[{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], }, \"polygon\": { \"description\": \"An array of linear rings\",", "governing permissions and # limitations under the License. # =============================================================================== import os.path import", "= resp[\"@iot.nextLink\"] except KeyError: return yield from get_items( {\"method\": \"get\", \"url\": next_url}, page_count", "and # limitations under the License. # =============================================================================== import os.path import click import", "{ \"title\": \"Point\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Point\"]}, \"coordinates\": {\"$ref\": \"#/definitions/position\"},", "validate, ValidationError import re IDREGEX = re.compile(r\"(?P<id>\\(\\d+\\))\") def verbose_message(msg): click.secho(msg, fg=\"green\") def warning(msg):", "thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\" if name is not None: query =", "self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj = resp except IndexError: return self.iotid", "{ \"description\": \"A single position\", \"type\": \"array\", \"minItems\": 2, \"items\": {\"type\": \"number\"}, \"additionalItems\":", "= base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property", "and pages < 0: pages = abs(pages) orderby = \"$orderby=id desc\" def get_items(request,", "is not None: query = f\"name eq '{name}'\" try: return next(self.get_locations(query)) except StopIteration:", "\"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\": {", "[\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema = {", "warning(msg): click.secho(msg, fg=\"red\") class BaseST: iotid = None _db_obj = None def __init__(self,", "\"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\": {\"type\":", "nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i : i + n]", "name=None, thing=None): entity = None if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"]", "= Locations(None, self._session, self._connection) return loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor =", "url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return {\"method\": method, \"url\": url}", "= {\"method\": \"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry)", "= self._db_obj[\"@iot.id\"] return True class Locations(BaseST): _schema = { \"type\": \"object\", \"properties\": {", "{\"type\": \"string\"}, \"location\": { \"type\": \"object\", \"required\": [\"type\", \"coordinates\"], \"oneOf\": [ { \"title\":", "def patch_location(self, iotid, payload, dry=False): location = Locations(payload, self._session, self._connection) location.iotid = iotid", "yielded ) start_request = self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield", "if resp.status_code == 200: return resp.json() elif request[\"method\"] == \"post\": if dry: return", "\"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def exists(self):", "= self.getfirst(f\"name eq '{name}'\") if resp: try: self._db_obj = resp except IndexError: return", "\"string\"}, \"description\": {\"type\": \"string\"}, \"unitOfMeasurement\": { \"type\": \"object\", \"required\": [\"name\", \"symbol\", \"definition\"], \"properties\":", "else: print(resp.status_code, resp.text) elif request[\"method\"] == \"patch\": if resp.status_code == 200: return True", "{ \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"}, }, }, ], }, }, \"required\":", "base_url with open(p, \"w\") as wfile: yaml.dump(self._connection, wfile) self._session = Session() @property def", "positions where the first equals the last\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 4}], },", "and self.exists(): return self.patch() else: request = self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload,", "_schema = { \"type\": \"object\", \"required\": [\"name\", \"definition\", \"description\"], \"properties\": { \"name\": {\"type\":", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "query})) params.append(f\"$filter={query}\") if params: url = f\"{url}?{'&'.join(params)}\" if expand: url = f\"{url}&$expand={expand}\" return", "{\"type\": \"string\"}, \"metadata\": {\"type\": \"string\"}, }, } class ObservedProperties(BaseST): _schema = { \"type\":", "= self.getfirst(f\"name eq '{name}'\", entity=f\"Things({lid})/Datastreams\") if resp: try: self._db_obj = resp except IndexError:", "= Sensors(payload, self._session, self._connection) sensor.put(dry) return sensor def put_observed_property(self, payload, dry=False): obs =", "of positions\", \"type\": \"array\", \"items\": {\"$ref\": \"#/definitions/position\"}, }, \"lineString\": { \"description\": \"An array", "user, \"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with", "permissions and # limitations under the License. # =============================================================================== import os.path import click", "and resp.status_code not in (200, 201): print(f\"request={request}\") print(f\"response={resp}\") return resp def _parse_response(self, request,", "= self._connection[\"base_url\"] if not base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request =", "in range(0, nobs, n): print(\"loading chunk {}/{}\".format(i, nobs)) chunk = obs[i : i", "os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\") if os.path.isfile(p): with open(p, \"r\") as rfile: obj = yaml.load(rfile, Loader=yaml.SafeLoader)", "base_url = f\"https://{base_url}/FROST-Server/v1.1\" if entity is None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\"", "ross # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "{ \"description\": \"An array of four positions where the first equals the last\",", "None: query = f\"name eq '{name}'\" return next(self.get_things(query, entity=entity)) def get_datastream(self, query=None, name=None,", "get_items(request, page_count, yielded): if pages: if page_count >= pages: return if verbose: pv", "= self._generate_request( \"get\", query=query, entity=entity, orderby=orderby, expand=expand, limit=limit, ) yield from get_items(start_request, 0,", "except in compliance with the License. # You may obtain a copy of", "**kw): if isinstance(datastream, dict): datastream = datastream[\"@<EMAIL>\"] entity = f\"Datastreams({datastream})/Observations\" yield from Datastreams(None,", "\"post\", \"url\": url} resp = self._send_request(request, json=pd, dry=dry) self._parse_response(request, resp, dry=dry) class Client:", "name = self._payload[\"name\"] thing = self._payload[\"Thing\"] lid = thing[\"@iot.id\"] resp = self.getfirst(f\"name eq", "{\"base_url\": base_url, \"user\": user, \"pwd\": <PASSWORD>} if not base_url: p = os.path.join(os.path.expanduser(\"~\"), \".sta.yaml\")", "return else: for v in resp[\"value\"]: if limit and yielded >= limit: return", "= \"\" if pages: pv = \"/{pages}\" verbose_message( f\"getting page={page_count + 1}{pv} -", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "{ \"type\": \"array\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def", "loc.get(None, verbose=True) def put_sensor(self, payload, dry=False): sensor = Sensors(payload, self._session, self._connection) sensor.put(dry) return", "BaseST: iotid = None _db_obj = None def __init__(self, payload, session, connection): self._payload", "base_url.startswith(\"http\"): base_url = f\"https://{base_url}/FROST-Server/v1.1\" url = f\"{base_url}/CreateObservations\" request = {\"method\": \"post\", \"url\": url}", "eq '{name}'\" return next(self.get_datastreams(query, entity=entity)) def get_observations(self, datastream, **kw): if isinstance(datastream, dict): datastream", "def get_datastream(self, query=None, name=None, thing=None): entity = None if thing: if isinstance(thing, dict):", "{ \"Datastream\": self._payload[\"Datastream\"], \"components\": self._payload[\"components\"], \"dataArray\": chunk, } ] base_url = self._connection[\"base_url\"] if", "{ \"type\": \"object\", \"required\": [\"@iot.id\"], \"properties\": {\"@iot.id\": {\"type\": \"number\"}}, }, }, } def", "else: for v in resp[\"value\"]: if limit and yielded >= limit: return yielded", "{ \"title\": \"Polygon\", \"type\": \"object\", \"properties\": { \"type\": {\"enum\": [\"Polygon\"]}, \"coordinates\": {\"$ref\": \"#/definitions/polygon\"},", "self._generate_request(\"post\") print(request) resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) def getfirst(self,", "get_locations(self, query=None, **kw): yield from Locations(None, self._session, self._connection).get(query, **kw) def get_things(self, query=None, **kw):", "[\"name\", \"description\", \"encodingType\", \"metadata\"], \"properties\": { \"name\": {\"type\": \"string\"}, \"description\": {\"type\": \"string\"}, \"encodingType\":", "}, }, } def exists(self): name = self._payload[\"name\"] location = self._payload[\"Locations\"][0] lid =", "self._generate_request(\"patch\") resp = self._send_request(request, json=self._payload, dry=dry) return self._parse_response(request, resp, dry=dry) class Things(BaseST): _schema", "{\"@iot.id\": {\"type\": \"number\"}}, }, }, } class ObservationsArray(BaseST): _schema = { \"type\": \"object\",", "def put_observed_property(self, payload, dry=False): obs = ObservedProperties(payload, self._session, self._connection) obs.put(dry) return obs def", "array of two or more positions\", \"allOf\": [{\"$ref\": \"#/definitions/positionArray\"}, {\"minItems\": 2}], }, \"linearRing\":", "== \"patch\": if resp.status_code == 200: return True def get( self, query, entity=None,", "None: entity = self.__class__.__name__ url = f\"{base_url}/{entity}\" if method == \"patch\": url =", "{self._payload}\" ) def _generate_request( self, method, query=None, entity=None, orderby=None, expand=None, limit=None ): if", "}, }, \"required\": [ \"name\", \"description\", \"unitOfMeasurement\", \"observationType\", \"Thing\", \"ObservedProperty\", \"Sensor\", ], }", "= None if thing: if isinstance(thing, dict): thing = thing[\"@<EMAIL>\"] entity = f\"Things({thing})/Datastreams\"", "Session() @property def base_url(self): return self._connection[\"base_url\"] def locations(self): loc = Locations(None, self._session, self._connection)", "= Things(payload, self._session, self._connection) thing.put(dry) return thing def add_observations(self, payload, dry=False): obs =" ]
[ "line in sys.stdin: row = line.strip().split(\" \") if len(row) == 10 File =", "\") if len(row) == 10 File = str(row[6]) t1 = File.strip().split(\".\") ind =", "if len(row) == 10 File = str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1", "sys for line in sys.stdin: row = line.strip().split(\" \") if len(row) == 10", "= str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1 t2 = str(t1[ind]) print t2", "10 File = str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1 t2 = str(t1[ind])", "in sys.stdin: row = line.strip().split(\" \") if len(row) == 10 File = str(row[6])", "len(row) == 10 File = str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1 t2", "File = str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1 t2 = str(t1[ind]) print", "== 10 File = str(row[6]) t1 = File.strip().split(\".\") ind = len(t1)-1 t2 =", "import sys for line in sys.stdin: row = line.strip().split(\" \") if len(row) ==", "for line in sys.stdin: row = line.strip().split(\" \") if len(row) == 10 File", "sys.stdin: row = line.strip().split(\" \") if len(row) == 10 File = str(row[6]) t1", "line.strip().split(\" \") if len(row) == 10 File = str(row[6]) t1 = File.strip().split(\".\") ind", "row = line.strip().split(\" \") if len(row) == 10 File = str(row[6]) t1 =", "#!/usr/bin/python import sys for line in sys.stdin: row = line.strip().split(\" \") if len(row)", "= line.strip().split(\" \") if len(row) == 10 File = str(row[6]) t1 = File.strip().split(\".\")" ]
[ "ttest_ind, spearmanr import random lexical_idxs = [1, 2, 4] verb_list = ['loves', 'hates',", "rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference model bert_embeds =", "RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb)", "Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign", "= np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample) in samples: continue samples.append(set(sample))", "BertTokenizer, BertModel import logging import matplotlib.pyplot as plt import sys import numpy as", "= [] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples = [] # Generate", "Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Random Verb: {sign_test(np.array(rsa_rand_verb_dist)", "= RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist", "\"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") #", "lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb =", "print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb:", "as plt import sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA", "sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr import random lexical_idxs", "RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0])", "print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj:", "embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]])", "glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats", "rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD:", "if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj", "'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list", "samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb = verb[sample]", "np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\")", "= RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\",", "obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom,", "representational similarity while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace = False,", "{np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random", "Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Random Verb: {sign_test(np.array(rsa_rand_verb_dist) - np.array(rsa_verb_dist))[1]}')", "rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom,", "similarity while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200)", "of representational similarity while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace =", "len(glove_list)), replace = False, size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds =", "len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample)", "samp_subj = subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom", "[] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples = [] # Generate 100", "sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as", "model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist =", "rsa_verb_dist = [] rsa_rand_verb_dist = [] samples = [] # Generate 100 samples", "print(\"glove embeds generated\") # Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT", "False, size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj =", "verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if", "{np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs.", "= RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom =", "continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb =", "Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign", "sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr import random lexical_idxs = [1,", "# Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis", "np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate", "\"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]])", "replace = False, size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample]", "= rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom =", "rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj:", "= bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb =", "= RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom],", "Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj", "as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats", "RSA_utils.utils as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise", "import random lexical_idxs = [1, 2, 4] verb_list = ['loves', 'hates', 'likes', 'smells',", "RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity", "size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample]", "import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as utils", "# Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}')", "print(\"data processed\") # Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs,", "# Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist", "bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run", "STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj", "np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds", "import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from", "RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj =", "Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist =", "axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests", "print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb:", "STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}')", "import sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils", "verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj)", "[] rsa_rand_verb_dist = [] samples = [] # Generate 100 samples of representational", "= [1, 2, 4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves',", "processed\") # Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list)", "random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove", "models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb =", "= [] # Generate 100 samples of representational similarity while len(samples) < 100:", "import torch from pytorch_pretrained_bert import BertTokenizer, BertModel import logging import matplotlib.pyplot as plt", "bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample]", "in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample]", "generated\") # Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\")", "'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus", "matplotlib.pyplot as plt import sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as", "- np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb", "{np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}')", "utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind,", "= RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom],", "= subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom =", "rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA", "RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0])", "STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj:", "= obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom =", "Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign", "== \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\")", "rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) #", "while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200) if", "'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9)", "{np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign", "300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb", "['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ ==", "rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb)", "np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs.", "Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj =", "print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) -", "pytorch_pretrained_bert import BertTokenizer, BertModel import logging import matplotlib.pyplot as plt import sys import", "< 100: sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample) in", "plt import sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import", "= np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate", "np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import", "spearmanr import random lexical_idxs = [1, 2, 4] verb_list = ['loves', 'hates', 'likes',", "= [] rsa_rand_verb_dist = [] samples = [] # Generate 100 samples of", "[] # Generate 100 samples of representational similarity while len(samples) < 100: sample", "np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0)", "2, 4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts',", "as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import", "samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb", "np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs.", "Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb:", "cosine_similarity from scipy.stats import ttest_ind, spearmanr import random lexical_idxs = [1, 2, 4]", "bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)}", "- np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb", "vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}')", "vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}')", "STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA", "sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test", "hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb", "Generate 100 samples of representational similarity while len(samples) < 100: sample = np.random.choice(range(0,", "subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds)", "lexical_idxs = [1, 2, 4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes',", "Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models embed_dict", "obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj)", "BertModel import logging import matplotlib.pyplot as plt import sys import numpy as np", "from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr", "generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist = []", "Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist)", "{np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD:", "= np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference model", "'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\":", "RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0])", "verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") #", "= False, size=200) if set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj", "glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models embed_dict =", "bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist = []", "axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD:", "bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list,", "[] samples = [] # Generate 100 samples of representational similarity while len(samples)", "Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA", "Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD:", "statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr import", "from scipy.stats import ttest_ind, spearmanr import random lexical_idxs = [1, 2, 4] verb_list", "embeds generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist =", "verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom,", "{sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test", "vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Random Verb: {sign_test(np.array(rsa_rand_verb_dist) -", "import cosine_similarity from scipy.stats import ttest_ind, spearmanr import random lexical_idxs = [1, 2,", "RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0])", "'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list,", "Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) -", "__name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data", "print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist", "{np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist)", "import sign_test from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr import random", "'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) #", "as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from sklearn.metrics.pairwise import", "100: sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample) in samples:", "embeds generated\") # Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds", "samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom", "verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1])", "import logging import matplotlib.pyplot as plt import sys import numpy as np sys.path.append(\"../..\")", "set(sample) in samples: continue samples.append(set(sample)) samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj =", "Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist)", "= [] samples = [] # Generate 100 samples of representational similarity while", "= RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom],", "= np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds", "random lexical_idxs = [1, 2, 4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches',", "[1, 2, 4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees',", "logging import matplotlib.pyplot as plt import sys import numpy as np sys.path.append(\"../..\") import", "print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Random", "<reponame>mlepori1/Picking_BERTs_Brain import torch from pytorch_pretrained_bert import BertTokenizer, BertModel import logging import matplotlib.pyplot as", "import BertTokenizer, BertModel import logging import matplotlib.pyplot as plt import sys import numpy", "4] verb_list = ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits']", "torch from pytorch_pretrained_bert import BertTokenizer, BertModel import logging import matplotlib.pyplot as plt import", "= RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj", "subj = np.array(embed_dict[lexical_idxs[0]]) verb = np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove", "np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference model bert_embeds", "Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj = np.array(embed_dict[lexical_idxs[0]])", "= np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list,", "100 samples of representational similarity while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)),", "samp_bert_embeds = bert_embeds[sample] samp_subj = subj[sample] samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb", "= verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom =", "Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Random Verb:", "rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples", "bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom", "import ttest_ind, spearmanr import random lexical_idxs = [1, 2, 4] verb_list = ['loves',", "subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom = RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom,", "samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom = RSA.calculate_geometry(samp_subj) obj_geom = RSA.calculate_geometry(samp_obj) verb_geom", "'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9)", "Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models", "rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples = [] #", "sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200) if set(sample) in samples: continue", "Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}')", "import matplotlib.pyplot as plt import sys import numpy as np sys.path.append(\"../..\") import RSA_utils.utils", "import RSA_utils.utils as RSA import glove_utils.utils as utils from statsmodels.stats.descriptivestats import sign_test from", "axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)}", "obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT reference", "bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj:", "axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA", "{np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs.", "if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt')", "reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist", "= RSA.calculate_geometry(samp_verb) rand_verb_geom = RSA.calculate_geometry(samp_rand_verb) rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0]) rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0]) rsa_verb_dist.append(spearmanr([verb_geom, bert_geom],", "print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}') print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)}", "RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist =", "'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess Corpus glove_list, bert_list =", "samples of representational similarity while len(samples) < 100: sample = np.random.choice(range(0, len(glove_list)), replace", "BERT reference model bert_embeds = RSA.get_bert_embeds(bert_list, 0) print(\"BERT embeds generated\") rsa_subj_dist = []", "{sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test", "from sklearn.metrics.pairwise import cosine_similarity from scipy.stats import ttest_ind, spearmanr import random lexical_idxs =", "from pytorch_pretrained_bert import BertTokenizer, BertModel import logging import matplotlib.pyplot as plt import sys", "print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test", "# Generate 100 samples of representational similarity while len(samples) < 100: sample =", "Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}') print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) -", "= [] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples =", "rsa_rand_verb_dist = [] samples = [] # Generate 100 samples of representational similarity", "0) print(\"BERT embeds generated\") rsa_subj_dist = [] rsa_obj_dist = [] rsa_verb_dist = []", "[] rsa_obj_dist = [] rsa_verb_dist = [] rsa_rand_verb_dist = [] samples = []", "# Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300, lexical_idxs, verb_list) subj", "RSA.preprocess_data('./head_trans_corpus.txt') print(\"data processed\") # Generate Glove hypothesis models embed_dict = RSA.get_glove_embeds(glove_list, \"../../glove_utils/glove/glove.6B.300d.txt\", 300,", "np.array(embed_dict[lexical_idxs[1]]) obj = np.array(embed_dict[lexical_idxs[2]]) rand_verb = np.array(embed_dict[-1]) print(\"glove embeds generated\") # Generate BERT", "Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}') print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}') print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)}", "samples = [] # Generate 100 samples of representational similarity while len(samples) <", "'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__ == \"__main__\": np.random.seed(seed=9) random.seed(9) # Preprocess", "bert_geom], axis=1)[0]) rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0]) # Run Tests print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}')", "samp_obj = obj[sample] samp_verb = verb[sample] samp_rand_verb = rand_verb[sample] bert_geom = RSA.calculate_geometry(samp_bert_embeds) subj_geom", "{np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}') print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}') print(f'Sign Test", "= ['loves', 'hates', 'likes', 'smells', 'touches', 'pushes', 'moves', 'sees', 'lifts', 'hits'] if __name__", "scipy.stats import ttest_ind, spearmanr import random lexical_idxs = [1, 2, 4] verb_list =", "numpy as np sys.path.append(\"../..\") import RSA_utils.utils as RSA import glove_utils.utils as utils from" ]
[ "config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member config_instance.entry_point_map = pkg_resources.get_entry_map('productionsystem') return", "<filename>tests/conftest.py \"\"\"Define necessary setup fixtures.\"\"\" import pytest import pkg_resources from productionsystem.config import ConfigSystem", "autouse=True) def config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) #", "the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member config_instance.entry_point_map = pkg_resources.get_entry_map('productionsystem')", "productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config entrypoint map.\"\"\"", "import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance", "\"\"\"Set up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member config_instance.entry_point_map", "def config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint:", "ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance =", "config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member", "\"\"\"Define necessary setup fixtures.\"\"\" import pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\",", "setup fixtures.\"\"\" import pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def", "from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config entrypoint", "import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the", "@pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None)", "pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up", "import pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set", "up the config entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member config_instance.entry_point_map =", "necessary setup fixtures.\"\"\" import pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True)", "pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config(): \"\"\"Set up the config", "fixtures.\"\"\" import pytest import pkg_resources from productionsystem.config import ConfigSystem @pytest.fixture(scope=\"session\", autouse=True) def config():", "entrypoint map.\"\"\" config_instance = ConfigSystem.setup(None) # pylint: disable=no-member config_instance.entry_point_map = pkg_resources.get_entry_map('productionsystem') return config_instance" ]
[]
[ "= Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return", "url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token, } res =", "self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid", "import requests from index.models import * class WeiXinLogin(): def __init__(self, code, state): self.code", "= 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token:", "url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type':", "params = { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res", "self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save()", "self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if", "= '' self.refresh_token = '' self.openid = '' self.is_expires = 1 self.detail =", "1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url", "self.is_expires) else: self.is_expires = 0 return True def get_info(self): # 4.拉取用户信息 user_info_url =", "res.get('openid')) for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else", "if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return", "= Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires()", "return True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid':", "return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s", "= value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to", "'' self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token", "WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires", "'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg')", "logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get", "self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if", "self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s", "# 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token, }", "widen import settings import requests from index.models import * class WeiXinLogin(): def __init__(self,", "{ 'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires", "'access_token': self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s'", "True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token,", "return True def get_detail(self): self.token_expires() if self.is_expires == 1: self.get_access_token() self.get_info() return self.detail", "self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires == 1: self.get_access_token() self.get_info() return", "params = { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url,", "res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % (", "# coding=utf-8 import logging from widen import settings import requests from index.models import", "get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid,", "} res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token =", "} res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires)", "self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode', None):", "return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret, 'code':", "res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res)", "params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url,", "'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode", "self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' %", "{ 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url,", "4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN',", "self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s", "logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info(", "= u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res", "= res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token,", "{ 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if", "res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\")", "self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') #", "requests from index.models import * class WeiXinLogin(): def __init__(self, code, state): self.code =", "= res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s'", "= res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True", "= { 'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'):", "self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid = '' self.is_expires", "# decode response content logging.info('Get user detail openid:' + res.get('openid')) for key, value", "if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token", "self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % (", "self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1", "if isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' %", "# 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type':", "'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg')", "settings import requests from index.models import * class WeiXinLogin(): def __init__(self, code, state):", "def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid':", "% ( self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = {", "res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid,", "= { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json()", "requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get user detail", "key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser", "= requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get user", "class WeiXinLogin(): def __init__(self, code, state): self.code = code self.state = state self.appid", "index.models import * class WeiXinLogin(): def __init__(self, code, state): self.code = code self.state", "self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return True def", "else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True", "res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token))", "self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return", "get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid,", "value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser =", "logging.info('Save%s to db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires ==", "return True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token':", "value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True def", "'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode',", "params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get user detail openid:'", "= state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token =", "Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires() if", "'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response", "Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token))", "True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid,", "# 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params =", "= { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res =", "= res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' %", "= 0 return True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params =", "'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg'))", "u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res =", "= 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return True def get_info(self):", "self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid,", "self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return", "self.refresh_token = '' self.openid = '' self.is_expires = 1 self.detail = {} #", "token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token,", "str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return", "'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode',", "self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return", "} res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response content", "params = { 'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json() if", "= '' self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): #", "0 return True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = {", "Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True", "user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', }", "'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): #", "__init__(self, code, state): self.code = code self.state = state self.appid = settings.APP_ID self.appsecret", "import * class WeiXinLogin(): def __init__(self, code, state): self.code = code self.state =", "self.is_expires = 0 return True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params", "self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token", "( self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid':", "res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s", "code, state): self.code = code self.state = state self.appid = settings.APP_ID self.appsecret =", "params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0", "= u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code'", "= res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid,", "self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params", "'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json()", "'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token'", "params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\")", "Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True def", "logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self):", "state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token = ''", "db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires == 1: self.get_access_token()", "logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True url =", "content logging.info('Get user detail openid:' + res.get('openid')) for key, value in res.items(): self.detail[key]", "openid:' + res.get('openid')) for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value,", "user detail openid:' + res.get('openid')) for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8')", "% ( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url =", "def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid':", "res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True url", "= u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url,", "response content logging.info('Get user detail openid:' + res.get('openid')) for key, value in res.items():", "res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get user detail openid:' + res.get('openid'))", "from index.models import * class WeiXinLogin(): def __init__(self, code, state): self.code = code", "for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value", "logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return True def get_info(self): # 4.拉取用户信息", "import settings import requests from index.models import * class WeiXinLogin(): def __init__(self, code,", "self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s'", "settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid = '' self.is_expires = 1", "self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return", "= '' self.openid = '' self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里", "= requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires", "= settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid =", "= code self.state = state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token =", "self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url =", "decode response content logging.info('Get user detail openid:' + res.get('openid')) for key, value in", "logging.info('Get user detail openid:' + res.get('openid')) for key, value in res.items(): self.detail[key] =", "value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db'", "res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return True", "res.get('errmsg') # decode response content logging.info('Get user detail openid:' + res.get('openid')) for key,", "u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json()", "% self.is_expires) else: self.is_expires = 0 return True def get_info(self): # 4.拉取用户信息 user_info_url", "'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg')", "if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token':", "self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' % ( self.access_token,", ";refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params =", "( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth'", "为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = {", "# 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo' params = { 'access_token': self.access_token, 'openid': self.openid, 'lang':", "* class WeiXinLogin(): def __init__(self, code, state): self.code = code self.state = state", "2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token,", ";openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时?", "res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save()", "res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token =", "= res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info(", "res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s", "1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires = 0 return True def get_info(self): #", "True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code,", "None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token')", "requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid =", "return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid = res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get =", "settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid = ''", "self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret,", "detail openid:' + res.get('openid')) for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if", "coding=utf-8 import logging from widen import settings import requests from index.models import *", "self.state = state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = '' self.refresh_token", "res.get(\"openid\") self.refresh_token = res.get('refresh_token') Token_get = Token(**res) Token_get.save() logging.info( 'access_token:%s ;openid:%s ;refresh_token:%s' %", "self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret':", "= {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token'", "{} # 为了方便大家看,我都写在一个函数里 def get_access_token(self): # 2.通过code换取网页授权access_token if self.refresh_token: url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params", "= u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res", "u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res =", "WeiXinLogin(): def __init__(self, code, state): self.code = code self.state = state self.appid =", "'appid': self.appid, 'access_token': self.access_token, } res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires =", "res = requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else:", "WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid')) return True def get_detail(self):", "def __init__(self, code, state): self.code = code self.state = state self.appid = settings.APP_ID", "= { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json()", "else: self.is_expires = 0 return True def get_info(self): # 4.拉取用户信息 user_info_url = u'https://api.weixin.qq.com/sns/userinfo'", "state): self.code = code self.state = state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET", "def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token':", "'grant_type': self.refresh_token, 'refresh_token': self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg'))", "self.access_token = '' self.refresh_token = '' self.openid = '' self.is_expires = 1 self.detail", "self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode', None):", "{ 'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if", "self.code = code self.state = state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token", "return res.get('errmsg') # decode response content logging.info('Get user detail openid:' + res.get('openid')) for", "= requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token = res.get(\"access_token\") self.openid", "% self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires == 1: self.get_access_token() self.get_info()", "u'https://api.weixin.qq.com/sns/oauth2/access_token' params = { 'appid': self.appid, 'secret': self.appsecret, 'code': self.code, 'grant_type': 'authorization_code' }", "if res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get user detail openid:' +", "requests.get(url, params=params).json() if res.get('errcode'): self.is_expires = 1 logging.info('is_expires:%s' % self.is_expires) else: self.is_expires =", "'access_token': self.access_token, 'openid': self.openid, 'lang': 'zh_CN', } res = requests.get(user_info_url, params=params).json() if res.get('errcode'):", "res = requests.get(user_info_url, params=params).json() if res.get('errcode'): return res.get('errmsg') # decode response content logging.info('Get", "isinstance(value, str) else value WxUser = Wxuser(**self.detail) WxUser.save() logging.info('Save%s to db' % self.detail.get('openid'))", "self.openid = '' self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def get_access_token(self):", "code self.state = state self.appid = settings.APP_ID self.appsecret = settings.APP_SECRET self.access_token = ''", "import logging from widen import settings import requests from index.models import * class", ";openid:%s ;refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True url = u'https://api.weixin.qq.com/sns/oauth2/access_token' params", "'' self.openid = '' self.is_expires = 1 self.detail = {} # 为了方便大家看,我都写在一个函数里 def", "url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token' params = { 'appid': self.appid, 'grant_type': self.refresh_token, 'refresh_token': self.refresh_token }", "self.refresh_token } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token", "'' self.refresh_token = '' self.openid = '' self.is_expires = 1 self.detail = {}", "from widen import settings import requests from index.models import * class WeiXinLogin(): def", "= settings.APP_SECRET self.access_token = '' self.refresh_token = '' self.openid = '' self.is_expires =", "self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params =", "to db' % self.detail.get('openid')) return True def get_detail(self): self.token_expires() if self.is_expires == 1:", "'authorization_code' } res = requests.get(url, params=params).json() if res.get('errcode', None): logging.info(res.get('errmsg')) return res.get('errmsg') self.access_token", "logging from widen import settings import requests from index.models import * class WeiXinLogin():", ";refresh_token:%s' % ( self.access_token, self.openid, self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url", "self.refresh_token)) return True def token_expires(self): # 监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = {", "+ res.get('openid')) for key, value in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str)", "in res.items(): self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value WxUser = Wxuser(**self.detail)", "监测当前access_token是否超时? url = u'https://api.weixin.qq.com/sns/auth' params = { 'appid': self.appid, 'access_token': self.access_token, } res" ]
[ "nval = 6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl =", "-*- # # Copyright 2013-2019 European Commission (JRC); # Licensed under the EUPL", "pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl", "% cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self):", "datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0],", "= { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls, l)", "not 0!\" % cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s):", "= datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works.", "http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import skip import pandas as pd", "Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22,", "0!\" % cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section", "in classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" %", "test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge", "(the 'Licence'); # You may not use this work except in compliance with", "goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot))", "cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\"", "sys import unittest from unittest.case import skip import pandas as pd from wltp", "# # Copyright 2013-2019 European Commission (JRC); # Licensed under the EUPL (the", "may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest", "in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"]", "Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls,", "class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls) for (cls, l) in", "0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\": # import", "def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True)", "obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from", "import wltp from wltp.experiment import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def", "the EUPL (the 'Licence'); # You may not use this work except in", "= goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2)", "European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may", "EUPL (the 'Licence'); # You may not use this work except in compliance", "unittest.case import skip import pandas as pd from wltp import datamodel import wltp", "cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not", "testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval} exp =", "wltp from wltp.experiment import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self):", "from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp =", "l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes =", "under the EUPL (the 'Licence'); # You may not use this work except", "Right-edge not INF!\") if __name__ == \"__main__\": # import sys;sys.argv = ['', 'Test.testName']", "use this work except in compliance with the Licence. # You may obtain", "= Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models", "compliance with the Licence. # You may obtain a copy of the Licence", "edges=True) for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0,", "for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s):", "<reponame>qinfeng2011/wltp #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2013-2019 European", "exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls,", "edges=True) for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l),", "wltp import datamodel import wltp from wltp.experiment import Experiment from .goodvehicle import goodVehicle", "mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval)", "mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = {", "cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge", "with the Licence. # You may obtain a copy of the Licence at:", "import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle()", "a copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case", "pandas as pd from wltp import datamodel import wltp from wltp.experiment import Experiment", "from unittest.case import skip import pandas as pd from wltp import datamodel import", "as pd from wltp import datamodel import wltp from wltp.experiment import Experiment from", "the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import skip import", "defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel", "= 6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl = exp._model", "datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l,", "{ cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls, l) in", "python # -*- coding: utf-8 -*- # # Copyright 2013-2019 European Commission (JRC);", "import skip import pandas as pd from wltp import datamodel import wltp from", "classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys()", "sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = {", "{\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self):", "cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for", "nval} exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes", "\"Class(%s): Left-edge not 0!\" % cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1],", "= goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\",", "l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls,", "not INF!\") if __name__ == \"__main__\": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()", "(cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls) for", "works. def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval}", "datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if", "coding: utf-8 -*- # # Copyright 2013-2019 European Commission (JRC); # Licensed under", "Left-edge not 0!\" % cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]),", "mdl = goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl,", "not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34])", ") @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl = goodVehicle()", "##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000", "pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not", "from wltp.experiment import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl", "Re-enable when pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2", "classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls)", "= {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def", "self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls) for (cls, l) in class_limits.items():", "# You may not use this work except in compliance with the Licence.", "Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import", "-*- coding: utf-8 -*- # # Copyright 2013-2019 European Commission (JRC); # Licensed", "self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__", "import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl", "len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def", "} for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" %", "import pandas as pd from wltp import datamodel import wltp from wltp.experiment import", "Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\")", "\"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl =", "mdl = goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue(", "Section Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l,", "= Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"]", "= exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable", "0, \"Class(%s): Left-edge not 0!\" % cls) for (cls, l) in class_limits.items(): self.assertEqual(", "test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in", "\"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls:", "% cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True)", ") def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits =", "exp = Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) )", "test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0],", "'Licence'); # You may not use this work except in compliance with the", "Licensed under the EUPL (the 'Licence'); # You may not use this work", "exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when", "2013-2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You", "work except in compliance with the Licence. # You may obtain a copy", "You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys import", "Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import skip import pandas", "mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits =", "import unittest from unittest.case import skip import pandas as pd from wltp import", "Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may not", "datamodel import wltp from wltp.experiment import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase):", "not use this work except in compliance with the Licence. # You may", "class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls, ) def", ".goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl)", "= datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\")", "Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp", "datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def", "not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\": #", "from wltp import datamodel import wltp from wltp.experiment import Experiment from .goodvehicle import", "nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for", "def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit:", "self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self):", "disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval =", "@skip(\"Cascade-models disabled\") ##TODO: Re-enable when pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval", "the Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl", "cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls, l) in class_limits.items():", "this work except in compliance with the Licence. # You may obtain a", "= datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() }", "l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls) for (cls,", "may not use this work except in compliance with the Licence. # You", "34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"),", "test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in", "goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl", "# Licensed under the EUPL (the 'Licence'); # You may not use this", "class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl = exp._model", "Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits", "class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits", "of the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import skip", "testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"]", "def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot =", "6000 mdl2 = {\"n_rated\": nval} exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"],", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2013-2019 European Commission", "(cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" %", "when pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2 =", "l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l", "classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\"", "len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l =", "for cls in classes.keys() } for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s):", "import datamodel import wltp from wltp.experiment import Experiment from .goodvehicle import goodVehicle class", "= datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge", "def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2 = {\"n_rated\": nval} exp", "except in compliance with the Licence. # You may obtain a copy of", "# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys", "unittest from unittest.case import skip import pandas as pd from wltp import datamodel", "in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls, )", "self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self):", "cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits", "\"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\": # import sys;sys.argv = ['',", "wltp.experiment import Experiment from .goodvehicle import goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl =", "# Copyright 2013-2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence');", "def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls", "import sys import unittest from unittest.case import skip import pandas as pd from", "mdl = exp._model defwot = datamodel.upd_default_load_curve({})[\"wot\"] self.assertTrue( pd.DataFrame(mdl[\"wot\"][[\"n_norm\", \"p_norm\"]]).equals(pd.DataFrame(defwot)) ) @skip(\"Cascade-models disabled\") ##TODO:", "You may not use this work except in compliance with the Licence. #", "0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ ==", "self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\")", "(JRC); # Licensed under the EUPL (the 'Licence'); # You may not use", "at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import skip import pandas as", "= exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls:", "float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\": # import sys;sys.argv =", "skip import pandas as pd from wltp import datamodel import wltp from wltp.experiment", "[22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not 0!\") self.assertEqual(pmr_limits[-1],", "exp = Experiment(mdl, mdl2) mdl = exp._model self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes =", "pd from wltp import datamodel import wltp from wltp.experiment import Experiment from .goodvehicle", "copy of the Licence at: http://ec.europa.eu/idabc/eupl import sys import unittest from unittest.case import", "\"Left-edge not 0!\") self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\":", "self.assertEqual(pmr_limits[-1], float(\"inf\"), \"PMR-limit: Right-edge not INF!\") if __name__ == \"__main__\": # import sys;sys.argv", "for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls)", "self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits =", "utf-8 -*- # # Copyright 2013-2019 European Commission (JRC); # Licensed under the", "% cls) for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge", "goodVehicle class Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl =", "in classes.keys() } for (cls, l) in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not", "self.assertEqual(mdl[\"n_rated\"], nval) def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True)", "datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for", "} for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def", "(cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self): classes", "pandel works. def testOverlayOnInit(self): mdl = goodVehicle() nval = 6000 mdl2 = {\"n_rated\":", "def test_get_class_parts_limits_sorted(self): classes = datamodel.get_wltc_data()[\"classes\"] class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls", "# -*- coding: utf-8 -*- # # Copyright 2013-2019 European Commission (JRC); #", "in class_limits.items(): self.assertEqual(l[0], 0, \"Class(%s): Left-edge not 0!\" % cls) for (cls, l)", "in compliance with the Licence. # You may obtain a copy of the", "Test(unittest.TestCase): def testGoodVehicle(self): mdl = goodVehicle() exp = Experiment(mdl) mdl = exp._model defwot", "for (cls, l) in class_limits.items(): self.assertEqual( l[-1], len(classes[cls][\"v_cycle\"]), \"Class(%s): Section Right-edge not len(cycle)!\"", "\"Class(%s): Section Right-edge not len(cycle)!\" % cls, ) def test_get_class_pmr_limits(self): l = datamodel.get_class_pmr_limits()", "l = datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0,", "datamodel.get_class_pmr_limits() self.assertSequenceEqual(l, [22, 34]) def test_get_class_pmr_limits_with_edges(self): pmr_limits = datamodel.get_class_pmr_limits(edges=True) self.assertEqual(pmr_limits[0], 0, \"Left-edge not", "class_limits = { cls: datamodel.get_class_parts_limits(cls, edges=True) for cls in classes.keys() } for (cls,", "for (cls, l) in class_limits.items(): self.assertSequenceEqual(l, sorted(l), \"Class(%s): Unsorted!\" % cls) def test_get_class_parts_limits_with_edges(self):", "Copyright 2013-2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); #" ]
[ "top == '$': return exp, ops exp.push(ops.pop()) top = ops.get() l2 = level(top)", "elif t in '()': continue else: t2 = curr.pop() t1 = curr.pop() result", "def pop(self): return self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex =", "top = ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print() exp, ops =", "u): self._list = self._list + [u] def get(self): return self._list[-1] if not self.isempty()", "exp, ops exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print()", "= ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex)", "elif c in '+-*$': top = ops.get() l1 = level(c) l2 = level(top)", "def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() ==", "'*/': return 2 elif v in '()': return 0 raise Exception(v) def compare(v1,", "ops = Stack(\"$\") exp = Queue() for c in (ex + \"$\"): if", "in '1234567890': exp.push(c) elif c == '(': ops.push('(') elif c == ')': while", "= parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t = exp.pop()", "== '$': return exp, ops exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c)", "isempty(self): return self._list.__len__() == 0 def push(self, u): pass def get(self): pass def", "作者(github账号):GoddessLuBoYan 内容:将四则运算的中缀表达式转换成后缀表达式并计算 要求:每个数字必须都是一位数,我没有考虑多位数和小数的情况 \"\"\" # 基本容器定义 class Container: def __init__(self, iterator=None): self._list =", "print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty():", "t in '()': continue else: t2 = curr.pop() t1 = curr.pop() result =", "self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty() else None #", "= ops.pop() if top == '(': break if top == '$': raise exp.push(top)", "continue else: t2 = curr.pop() t1 = curr.pop() result = eval(str(t1) + str(t)", "c == ')': while True: top = ops.pop() if top == '(': break", "push(self, u): self._list = self._list + [u] def get(self): return self._list[-1] if not", "要求:每个数字必须都是一位数,我没有考虑多位数和小数的情况 \"\"\" # 基本容器定义 class Container: def __init__(self, iterator=None): self._list = [] if", "self._list = [] if iterator: for i in iterator: self.push(i) def __len__(self): return", "i in iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def", "c == '(': ops.push('(') elif c == ')': while True: top = ops.pop()", "def __init__(self, iterator=None): self._list = [] if iterator: for i in iterator: self.push(i)", "= ops.get() l1 = level(c) l2 = level(top) while l1 <= l2: if", "= curr.pop() result = eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t) +", "self._list = [u] + self._list def get(self): return self._list[0] if not self.isempty() else", "top = ops.get() l1 = level(c) l2 = level(top) while l1 <= l2:", "eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t) + str(t2), '=', result) curr.push(result)", "l2: if c == '$' and top == '$': return exp, ops exp.push(ops.pop())", "+ str(t) + str(t2)) print(str(t1) + str(t) + str(t2), '=', result) curr.push(result) print(curr)", "iterator: for i in iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return", "def pop(self): pass class Stack(Container): def push(self, u): self._list = [u] + self._list", "return self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def", "'$': return exp, ops exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c) print(ops)", "return 0 elif v in '+-': return 1 elif v in '*/': return", "and top == '$': return exp, ops exp.push(ops.pop()) top = ops.get() l2 =", "0 def push(self, u): pass def get(self): pass def pop(self): pass class Stack(Container):", "t2 = curr.pop() t1 = curr.pop() result = eval(str(t1) + str(t) + str(t2))", "return self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def push(self, u): pass def", "return exp, ops exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c) print(ops) print(exp)", "self.isempty() else None class Queue(Container): def push(self, u): self._list = self._list + [u]", "基本容器定义 class Container: def __init__(self, iterator=None): self._list = [] if iterator: for i", "return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def", "print(ops) print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while", "def get(self): pass def pop(self): pass class Stack(Container): def push(self, u): self._list =", "level(v1) - level(v2) def parse(ex): ops = Stack(\"$\") exp = Queue() for c", "level(v): if v in '$': return 0 elif v in '+-': return 1", "get(self): return self._list[-1] if not self.isempty() else None def pop(self): return self._list.pop(0) if", "self._list def get(self): return self._list[0] if not self.isempty() else None def pop(self): return", "else None class Queue(Container): def push(self, u): self._list = self._list + [u] def", "in '()': return 0 raise Exception(v) def compare(v1, v2): return level(v1) - level(v2)", "Exception(v) def compare(v1, v2): return level(v1) - level(v2) def parse(ex): ops = Stack(\"$\")", "'(': break if top == '$': raise exp.push(top) elif c in '+-*$': top", "pop(self): return self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\"", "- level(v2) def parse(ex): ops = Stack(\"$\") exp = Queue() for c in", "if top == '$': raise exp.push(top) elif c in '+-*$': top = ops.get()", "elif c == '(': ops.push('(') elif c == ')': while True: top =", "c in '+-*$': top = ops.get() l1 = level(c) l2 = level(top) while", "t in \"123456789\": curr.push(t) elif t in '()': continue else: t2 = curr.pop()", "# 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return 0", "iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return", "class Stack(Container): def push(self, u): self._list = [u] + self._list def get(self): return", "exp.pop() if t in \"123456789\": curr.push(t) elif t in '()': continue else: t2", "Stack(\"$\") exp = Queue() for c in (ex + \"$\"): if c in", "+ \"$\"): if c in '1234567890': exp.push(c) elif c == '(': ops.push('(') elif", "c in (ex + \"$\"): if c in '1234567890': exp.push(c) elif c ==", "ops.push('(') elif c == ')': while True: top = ops.pop() if top ==", "def push(self, u): self._list = self._list + [u] def get(self): return self._list[-1] if", "not self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty() else None", "= \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return 0 elif v in", "= Stack(\"$\") exp = Queue() for c in (ex + \"$\"): if c", "c == '$' and top == '$': return exp, ops exp.push(ops.pop()) top =", "class Queue(Container): def push(self, u): self._list = self._list + [u] def get(self): return", "= level(c) l2 = level(top) while l1 <= l2: if c == '$'", "t = exp.pop() if t in \"123456789\": curr.push(t) elif t in '()': continue", "if not self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty() else", "print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not", "')': while True: top = ops.pop() if top == '(': break if top", "exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t", "pass class Stack(Container): def push(self, u): self._list = [u] + self._list def get(self):", "+ [u] def get(self): return self._list[-1] if not self.isempty() else None def pop(self):", "== '(': ops.push('(') elif c == ')': while True: top = ops.pop() if", "exp.isempty(): t = exp.pop() if t in \"123456789\": curr.push(t) elif t in '()':", "'()': continue else: t2 = curr.pop() t1 = curr.pop() result = eval(str(t1) +", "0 elif v in '+-': return 1 elif v in '*/': return 2", "\"$\"): if c in '1234567890': exp.push(c) elif c == '(': ops.push('(') elif c", "计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t = exp.pop() if t in", "None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return", "pop(self): pass class Stack(Container): def push(self, u): self._list = [u] + self._list def", "\"\"\" 作者(github账号):GoddessLuBoYan 内容:将四则运算的中缀表达式转换成后缀表达式并计算 要求:每个数字必须都是一位数,我没有考虑多位数和小数的情况 \"\"\" # 基本容器定义 class Container: def __init__(self, iterator=None): self._list", "def push(self, u): self._list = [u] + self._list def get(self): return self._list[0] if", "'()': return 0 raise Exception(v) def compare(v1, v2): return level(v1) - level(v2) def", "[u] def get(self): return self._list[-1] if not self.isempty() else None def pop(self): return", "def compare(v1, v2): return level(v1) - level(v2) def parse(ex): ops = Stack(\"$\") exp", "raise exp.push(top) elif c in '+-*$': top = ops.get() l1 = level(c) l2", "else None def pop(self): return self._list.pop(0) if not self.isempty() else None class Queue(Container):", "= self._list + [u] def get(self): return self._list[-1] if not self.isempty() else None", "self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in", "not self.isempty() else None class Queue(Container): def push(self, u): self._list = self._list +", "Container: def __init__(self, iterator=None): self._list = [] if iterator: for i in iterator:", "= Queue() for c in (ex + \"$\"): if c in '1234567890': exp.push(c)", "while not exp.isempty(): t = exp.pop() if t in \"123456789\": curr.push(t) elif t", "top = ops.pop() if top == '(': break if top == '$': raise", "top == '(': break if top == '$': raise exp.push(top) elif c in", "while True: top = ops.pop() if top == '(': break if top ==", "self._list.__len__() == 0 def push(self, u): pass def get(self): pass def pop(self): pass", "get(self): pass def pop(self): pass class Stack(Container): def push(self, u): self._list = [u]", "in '$': return 0 elif v in '+-': return 1 elif v in", "def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def push(self, u):", "v in '()': return 0 raise Exception(v) def compare(v1, v2): return level(v1) -", "compare(v1, v2): return level(v1) - level(v2) def parse(ex): ops = Stack(\"$\") exp =", "if c == '$' and top == '$': return exp, ops exp.push(ops.pop()) top", "None class Queue(Container): def push(self, u): self._list = self._list + [u] def get(self):", "'+-*$': top = ops.get() l1 = level(c) l2 = level(top) while l1 <=", "l1 = level(c) l2 = level(top) while l1 <= l2: if c ==", "0 raise Exception(v) def compare(v1, v2): return level(v1) - level(v2) def parse(ex): ops", "Queue() for c in (ex + \"$\"): if c in '1234567890': exp.push(c) elif", "if not self.isempty() else None class Queue(Container): def push(self, u): self._list = self._list", "exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print() exp, ops", "pass def pop(self): pass class Stack(Container): def push(self, u): self._list = [u] +", "self._list[0] if not self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty()", "self._list.pop(0) if not self.isempty() else None class Queue(Container): def push(self, u): self._list =", "def isempty(self): return self._list.__len__() == 0 def push(self, u): pass def get(self): pass", "ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return 0 elif v", "pass def get(self): pass def pop(self): pass class Stack(Container): def push(self, u): self._list", "v in '*/': return 2 elif v in '()': return 0 raise Exception(v)", "return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return", "self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__()", "解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return 0 elif", "return 2 elif v in '()': return 0 raise Exception(v) def compare(v1, v2):", "in '()': continue else: t2 = curr.pop() t1 = curr.pop() result = eval(str(t1)", "None def pop(self): return self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex", "2 elif v in '()': return 0 raise Exception(v) def compare(v1, v2): return", "in (ex + \"$\"): if c in '1234567890': exp.push(c) elif c == '(':", "elif v in '+-': return 1 elif v in '*/': return 2 elif", "__init__(self, iterator=None): self._list = [] if iterator: for i in iterator: self.push(i) def", "str(t) + str(t2)) print(str(t1) + str(t) + str(t2), '=', result) curr.push(result) print(curr) print(eval(ex))", "curr = Stack() while not exp.isempty(): t = exp.pop() if t in \"123456789\":", "True: top = ops.pop() if top == '(': break if top == '$':", "curr.pop() result = eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t) + str(t2),", "= exp.pop() if t in \"123456789\": curr.push(t) elif t in '()': continue else:", "= level(top) ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr", "if iterator: for i in iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self):", "ops.get() l1 = level(c) l2 = level(top) while l1 <= l2: if c", "return self._list.pop(0) if not self.isempty() else None class Queue(Container): def push(self, u): self._list", "not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v", "self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def push(self,", "\"123456789\": curr.push(t) elif t in '()': continue else: t2 = curr.pop() t1 =", "in '*/': return 2 elif v in '()': return 0 raise Exception(v) def", "class Container: def __init__(self, iterator=None): self._list = [] if iterator: for i in", "'$': return 0 elif v in '+-': return 1 elif v in '*/':", "ops exp.push(ops.pop()) top = ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print() exp,", "l1 <= l2: if c == '$' and top == '$': return exp,", "# 基本容器定义 class Container: def __init__(self, iterator=None): self._list = [] if iterator: for", "in \"123456789\": curr.push(t) elif t in '()': continue else: t2 = curr.pop() t1", "v in '+-': return 1 elif v in '*/': return 2 elif v", "'+-': return 1 elif v in '*/': return 2 elif v in '()':", "not exp.isempty(): t = exp.pop() if t in \"123456789\": curr.push(t) elif t in", "self._list[-1] if not self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty()", "else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$':", "Stack(Container): def push(self, u): self._list = [u] + self._list def get(self): return self._list[0]", "Stack() while not exp.isempty(): t = exp.pop() if t in \"123456789\": curr.push(t) elif", "1 elif v in '*/': return 2 elif v in '()': return 0", "get(self): return self._list[0] if not self.isempty() else None def pop(self): return self._list.pop(0) if", "def level(v): if v in '$': return 0 elif v in '+-': return", "def push(self, u): pass def get(self): pass def pop(self): pass class Stack(Container): def", "in '+-': return 1 elif v in '*/': return 2 elif v in", "def get(self): return self._list[0] if not self.isempty() else None def pop(self): return self._list.pop(0)", "v in '$': return 0 elif v in '+-': return 1 elif v", "== ')': while True: top = ops.pop() if top == '(': break if", "= eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t) + str(t2), '=', result)", "self._list = self._list + [u] def get(self): return self._list[-1] if not self.isempty() else", "level(v2) def parse(ex): ops = Stack(\"$\") exp = Queue() for c in (ex", "内容:将四则运算的中缀表达式转换成后缀表达式并计算 要求:每个数字必须都是一位数,我没有考虑多位数和小数的情况 \"\"\" # 基本容器定义 class Container: def __init__(self, iterator=None): self._list = []", "top == '$': raise exp.push(top) elif c in '+-*$': top = ops.get() l1", "if t in \"123456789\": curr.push(t) elif t in '()': continue else: t2 =", "return level(v1) - level(v2) def parse(ex): ops = Stack(\"$\") exp = Queue() for", "level(top) while l1 <= l2: if c == '$' and top == '$':", "'$' and top == '$': return exp, ops exp.push(ops.pop()) top = ops.get() l2", "for c in (ex + \"$\"): if c in '1234567890': exp.push(c) elif c", "break if top == '$': raise exp.push(top) elif c in '+-*$': top =", "for i in iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__()", "if v in '$': return 0 elif v in '+-': return 1 elif", "'$': raise exp.push(top) elif c in '+-*$': top = ops.get() l1 = level(c)", "def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def", "__len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self):", "self.isempty() else None def pop(self): return self._list.pop(0) if not self.isempty() else None class", "iterator=None): self._list = [] if iterator: for i in iterator: self.push(i) def __len__(self):", "if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v): if", "push(self, u): self._list = [u] + self._list def get(self): return self._list[0] if not", "self._list + [u] def get(self): return self._list[-1] if not self.isempty() else None def", "c in '1234567890': exp.push(c) elif c == '(': ops.push('(') elif c == ')':", "if top == '(': break if top == '$': raise exp.push(top) elif c", "<= l2: if c == '$' and top == '$': return exp, ops", "[u] + self._list def get(self): return self._list[0] if not self.isempty() else None def", "parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t = exp.pop() if", "__repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def push(self, u): pass", "else None def pop(self): return self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式", "level(c) l2 = level(top) while l1 <= l2: if c == '$' and", "pop(self): return self._list.pop(0) if not self.isempty() else None class Queue(Container): def push(self, u):", "self._list.pop(0) if not self.isempty() else None # 解析中缀表达式并转换成后缀表达式 ex = \"5+9-8*7+6*(5-4+(3*2))\" def level(v):", "'(': ops.push('(') elif c == ')': while True: top = ops.pop() if top", "while l1 <= l2: if c == '$' and top == '$': return", "v2): return level(v1) - level(v2) def parse(ex): ops = Stack(\"$\") exp = Queue()", "(ex + \"$\"): if c in '1234567890': exp.push(c) elif c == '(': ops.push('(')", "return self._list.__len__() == 0 def push(self, u): pass def get(self): pass def pop(self):", "in '+-*$': top = ops.get() l1 = level(c) l2 = level(top) while l1", "= [u] + self._list def get(self): return self._list[0] if not self.isempty() else None", "\"\"\" # 基本容器定义 class Container: def __init__(self, iterator=None): self._list = [] if iterator:", "Queue(Container): def push(self, u): self._list = self._list + [u] def get(self): return self._list[-1]", "raise Exception(v) def compare(v1, v2): return level(v1) - level(v2) def parse(ex): ops =", "exp = Queue() for c in (ex + \"$\"): if c in '1234567890':", "+ self._list def get(self): return self._list[0] if not self.isempty() else None def pop(self):", "== 0 def push(self, u): pass def get(self): pass def pop(self): pass class", "self._list.__repr__() def isempty(self): return self._list.__len__() == 0 def push(self, u): pass def get(self):", "elif v in '*/': return 2 elif v in '()': return 0 raise", "exp.push(c) elif c == '(': ops.push('(') elif c == ')': while True: top", "exp.push(top) elif c in '+-*$': top = ops.get() l1 = level(c) l2 =", "__str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__() def isempty(self): return self._list.__len__() == 0", "l2 = level(top) ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式", "t1 = curr.pop() result = eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t)", "elif v in '()': return 0 raise Exception(v) def compare(v1, v2): return level(v1)", "elif c == ')': while True: top = ops.pop() if top == '(':", "return 1 elif v in '*/': return 2 elif v in '()': return", "curr.push(t) elif t in '()': continue else: t2 = curr.pop() t1 = curr.pop()", "'1234567890': exp.push(c) elif c == '(': ops.push('(') elif c == ')': while True:", "\"5+9-8*7+6*(5-4+(3*2))\" def level(v): if v in '$': return 0 elif v in '+-':", "== '$' and top == '$': return exp, ops exp.push(ops.pop()) top = ops.get()", "ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t =", "parse(ex): ops = Stack(\"$\") exp = Queue() for c in (ex + \"$\"):", "def get(self): return self._list[-1] if not self.isempty() else None def pop(self): return self._list.pop(0)", "def parse(ex): ops = Stack(\"$\") exp = Queue() for c in (ex +", "ops.pop() if top == '(': break if top == '$': raise exp.push(top) elif", "level(top) ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr =", "= [] if iterator: for i in iterator: self.push(i) def __len__(self): return self._list.__len__()", "return 0 raise Exception(v) def compare(v1, v2): return level(v1) - level(v2) def parse(ex):", "l2 = level(top) while l1 <= l2: if c == '$' and top", "curr.pop() t1 = curr.pop() result = eval(str(t1) + str(t) + str(t2)) print(str(t1) +", "result = eval(str(t1) + str(t) + str(t2)) print(str(t1) + str(t) + str(t2), '=',", "self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self): return self._list.__repr__()", "push(self, u): pass def get(self): pass def pop(self): pass class Stack(Container): def push(self,", "in iterator: self.push(i) def __len__(self): return self._list.__len__() def __str__(self): return self._list.__str__() def __repr__(self):", "u): pass def get(self): pass def pop(self): pass class Stack(Container): def push(self, u):", "def pop(self): return self._list.pop(0) if not self.isempty() else None class Queue(Container): def push(self,", "== '(': break if top == '$': raise exp.push(top) elif c in '+-*$':", "u): self._list = [u] + self._list def get(self): return self._list[0] if not self.isempty()", "if c in '1234567890': exp.push(c) elif c == '(': ops.push('(') elif c ==", "None def pop(self): return self._list.pop(0) if not self.isempty() else None class Queue(Container): def", "= level(top) while l1 <= l2: if c == '$' and top ==", "= Stack() while not exp.isempty(): t = exp.pop() if t in \"123456789\": curr.push(t)", "return self._list[0] if not self.isempty() else None def pop(self): return self._list.pop(0) if not", "# 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack() while not exp.isempty(): t = exp.pop() if t", "else: t2 = curr.pop() t1 = curr.pop() result = eval(str(t1) + str(t) +", "return self._list[-1] if not self.isempty() else None def pop(self): return self._list.pop(0) if not", "= curr.pop() t1 = curr.pop() result = eval(str(t1) + str(t) + str(t2)) print(str(t1)", "== '$': raise exp.push(top) elif c in '+-*$': top = ops.get() l1 =", "[] if iterator: for i in iterator: self.push(i) def __len__(self): return self._list.__len__() def", "ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex) # 计算后缀表达式,并使用eval验算原中缀表达式 curr = Stack()", "ops.get() l2 = level(top) ops.push(c) print(ops) print(exp) print() exp, ops = parse(ex) #" ]
[ "test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self):", "'') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self):", "self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def", "= Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK)", "def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def", "plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40'])", "'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING,", "plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold,", "self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5))", "import unittest from nagplug import Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING,", "test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self):", "self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, [])", "self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING)", "test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self):", "test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def", "def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK,", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin()", "'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=',", "plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING,", "plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING')", "nagplug import Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN class", "= Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING']))", "self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c',", "Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11))", "plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin =", "def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def", "test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c',", "Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF'])) if __name__ == '__main__': unittest.main()", "class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def", "test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self):", "test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self):", "Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin =", "test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20))", "TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin()", "plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError,", "'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin =", "import Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase):", "plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING')", "def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def", "def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def", "test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin()", "def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self):", "self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin", "args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args", "ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin", "self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11))", "'0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args,", "def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def", "plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class", "self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self):", "unittest from nagplug import Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL,", "plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin()", "def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin =", "plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING)", "Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL,", "CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args", "UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args =", "Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\"))", "test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING,", "'10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w',", "Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def", "self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin =", "plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase):", "= Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF'])) if __name__ == '__main__':", "= Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING')", "def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def", "plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class", "self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin", "CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def", "def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def", "'-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold',", "self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5))", "test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin()", "'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(),", "self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10))", "WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true')", "CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin", "= plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin =", "def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def", "= Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(),", "self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, [])", "= Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c',", "plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK',", "def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin =", "def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args =", "plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def", "', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!')", "test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin()", "test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self):", "Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def", "test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin", "Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING,", "Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def", "'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase):", "test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self):", "Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError,", "Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin()", "= Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold)", "test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self):", "nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin()", "plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL,", "self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING')", "def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ',", "OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin", "type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self):", "def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK,", "(\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10))", "test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def", "def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '),", "def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20',", "Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self):", "plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold',", "type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold,", "self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10))", "def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self):", "= Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL')", "'10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test')", "= Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self):", "class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!',", "def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin =", "import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e',", "Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self):", "self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def", "self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN)", "'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL,", "Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING,", "test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin()", "test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(),", "test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self):", "plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING')", "from nagplug import Plugin, Threshold, ArgumentParserError from nagplug import OK, WARNING, CRITICAL, UNKNOWN", "OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test',", "plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold))", "[]) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase):", "'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK,", "TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF']))", "= Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin =", "args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold')", "plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin()", "TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self):", "Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL']))", "'0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold)", "Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40'])", "'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin()", "def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class", "', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING')", "self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1))", "TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin()", "'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING,", "plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def", "= Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold,", "plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK,", "class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin =", "plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(),", "'--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold))", "'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL')", "def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1))", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin", "test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin", "Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20))", "plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL)", "Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15,", "plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self):", "= Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK')", "action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c',", "self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL')", "test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self):", "msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF')", "plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def", "self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w',", "CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(),", "plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING')", "test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self):", "'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin =", "plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold,", "test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0))", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def", "'--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold')", "'--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15,", "Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(),", "def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self):", "self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL')", "'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def", "plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def", "test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self):", "self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10))", "plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK,", "self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20))", "UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin", "def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def", "plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args =", "test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self):", "'--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self):", "class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin =", "plugin = Plugin() self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(),", "'-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError,", "'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def", "def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self): plugin =", "'.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK',", "test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF'])) if __name__", "test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self):", "self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21))", "def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def", "self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15))", "def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING')", "def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def", "plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN')", "[]) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\"))", "self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10))", "plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w',", "self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self):", "= Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin", "self.assertEqual(plugin.get_code(), UNKNOWN) def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self):", "class TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e'])", "args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin", "def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin =", "test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w',", "plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK,", "WARNING) def test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin", "'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self):", "= Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK')", "test_threshold_openrange_inside(self): self.assertTrue(Threshold(\":10\").check(5)) def test_threshold_openrange_over(self): self.assertFalse(Threshold(\":10\").check(20)) def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase):", "', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(),", "'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING,", "plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ',", "self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10))", "test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self):", "def test_threshold_openrange_neg(self): self.assertTrue(Threshold(\"~:10\").check(-1)) def test_threshold_openrange_neg_over(self): self.assertFalse(Threshold(\"~:10\").check(11)) class TestCode(unittest.TestCase): def test_simple_default(self): plugin = Plugin()", "args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self):", "test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]),", "'--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def", "self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin = Plugin() plugin.add_extdata('OK')", "'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=',", "def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def", "= Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING',", "= Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK,", "def test_theshold_simple_inside(self): self.assertTrue(Threshold(\"10\").check(5)) def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def", "Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL)", "from nagplug import OK, WARNING, CRITICAL, UNKNOWN class TestParsing(unittest.TestCase): def test_parse(self): plugin =", "def test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def", "= plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self): plugin =", "Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(),", "'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL,", "plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin =", "test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ',", "= plugin.parser.parse_args(['-e']) self.assertTrue(args.test) def test_parse_threshold_string(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold') plugin.add_arg('-c', '--critical-threshold') args", "def test_threshold_range_one(self): self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def", "self.assertTrue(Threshold(\"10:10\").check(10)) def test_threshold_range_lowerbound(self): self.assertTrue(Threshold(\"10:20\").check(10)) def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9))", "def test_threshold_range_inside(self): self.assertTrue(Threshold(\"10:20\").check(15)) def test_threshold_range_upperbound(self): self.assertTrue(Threshold(\"10:20\").check(20)) def test_threshold_range_lower(self): self.assertFalse(Threshold(\"10:20\").check(9)) def test_threshold_range_upper(self): self.assertFalse(Threshold(\"10:20\").check(21)) def", "self.assertEqual(plugin.get_code(), OK) def test_simple_warning(self): plugin = Plugin() plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_critical(self):", "'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def", "'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def", "plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold) args = plugin.parse_args(['-w', '10:20',", "args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin() plugin.add_arg('-w', '--warning-threshold', type=Threshold) plugin.add_arg('-c', '--critical-threshold', type=Threshold)", "'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]), ', '.join(['WARNING'])) class TestExtData(unittest.TestCase): def test_simple(self): plugin", "plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin", "args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin", "type=Threshold) args = plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_exceptions(self):", "WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING, 'WARNING') plugin.add_result(WARNING,", "'.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL,", "plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold',", "self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError,", "test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def test_theshold_simple_zero(self): self.assertTrue(Threshold(\"10\").check(0)) def test_theshold_simple_upperbound(self): self.assertTrue(Threshold(\"10\").check(10)) def test_theshold_simple_inside(self):", "def test_simple_owc(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL)", "self.assertEqual(plugin.get_message(), '') def test_simple_ok(self): plugin = Plugin() plugin.add_result(OK, 'OK') self.assertEqual(plugin.get_message(), 'OK') def test_simple_owc(self):", "plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin()", "plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF'])) if __name__ ==", "test_threshold_parseerror(self): self.assertRaises(ValueError, Threshold, (\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def", "'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_ow(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(),", "plugin.add_result(WARNING, 'UNKNOWN') self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '')", "def test_simple(self): plugin = Plugin() plugin.add_extdata('OK') plugin.add_extdata('hey!') plugin.add_extdata('STUFF') self.assertEqual(plugin.get_extdata(), '\\n'.join(['OK', 'hey!', 'STUFF'])) if", "def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args, []) class TestThreshold(unittest.TestCase): def", "'), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING,", "TestParsing(unittest.TestCase): def test_parse(self): plugin = Plugin() plugin.add_arg('-e', '--test', action='store_true') args = plugin.parser.parse_args(['-e']) self.assertTrue(args.test)", "plugin.parse_args(['-w', '10:20', '-c', '0:40']) self.assertEqual(OK, plugin.check_threshold(15, args.warning_threshold, args.critical_threshold)) def test_parse_threshold_native(self): plugin = Plugin()", "= Plugin() plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin()", "'OK') plugin.add_result(WARNING, 'WARNING') plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self):", "self.assertEqual(plugin.get_code(), CRITICAL) class TestMessage(unittest.TestCase): def test_simple_default(self): plugin = Plugin() self.assertEqual(plugin.get_message(), '') def test_simple_ok(self):", "plugin.add_result(OK, 'OK') plugin.add_result(WARNING, 'WARNING') self.assertEqual(plugin.get_code(), WARNING) def test_simple_cw(self): plugin = Plugin() plugin.add_result(CRITICAL, 'OK')", "test_threshold_invert_bound(self): self.assertFalse(Threshold(\"@10\").check(10)) def test_threshold_invert_range(self): self.assertFalse(Threshold(\"@10:20\").check(10)) def test_threshold_invert_upper(self): self.assertFalse(Threshold(\"@:20\").check(10)) def test_threshold_openrange_simple(self): self.assertTrue(Threshold(\"10:\").check(20)) def test_threshold_openrange_inside(self):", "test_simple_critical(self): plugin = Plugin() plugin.add_result(CRITICAL, 'CRITICAL') self.assertEqual(plugin.get_code(), CRITICAL) def test_simple_owc(self): plugin = Plugin()", "args.critical_threshold)) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin", "'CRITICAL') self.assertEqual(plugin.get_message(joiner=', '), ', '.join(['OK', 'WARNING', 'CRITICAL'])) def test_simple_owc_level(self): plugin = Plugin() plugin.add_result(OK,", "plugin.add_arg('test') self.assertRaises(ArgumentParserError, plugin.parse_args, []) def test_parse_exceptions(self): plugin = Plugin() plugin.add_arg('threshold', type=Threshold) self.assertRaises(ArgumentParserError, plugin.parse_args,", "(\"helloworld\")) def test_threshold_valueerror(self): self.assertRaises(ValueError, Threshold, (\"10:2\")) def test_theshold_simple_neg(self): self.assertFalse(Threshold(\"10\").check(-1)) def test_theshold_simple_over(self): self.assertFalse(Threshold(\"10\").check(11)) def" ]
[ "json import urllib.request import ssl from sys import argv unverified = ssl._create_unverified_context() start", "str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get ==", "a reliable transcript is roughly 1608? # Last with any might be 1677.", "transcript is roughly 1608? # Last with any might be 1677. # But", "import urllib.request import ssl from sys import argv unverified = ssl._create_unverified_context() start =", "python_obj['num'] assert comic_to_get == n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1", "be 1677. # But 1677 really contains transcript from 1674. # someone noticed", "reliable transcript is roughly 1608? # Last with any might be 1677. #", "end = e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) +", "python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) + ',' +", "get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url, context =", "s < e start = s end = e + 1 def get_comic_by_num(n):", "comic_to_get == n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words =", "this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1]) e = int(argv[2]) assert", "Last with any might be 1677. # But 1677 really contains transcript from", "return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get == 404: next", "python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n T = python_obj['transcript']", "comic_to_get in range(start, end): if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n", "sys import argv unverified = ssl._create_unverified_context() start = 614 end = 617 #", "= 'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data", "comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get ==", "assert comic_to_get == n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words", "in range(start, end): if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n =", "614 end = 617 # last comic with a reliable transcript is roughly", "+ 1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json' response =", "= len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) + ',' + str(panels) +", "last comic with a reliable transcript is roughly 1608? # Last with any", "+ '/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8') return", "transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s", "any might be 1677. # But 1677 really contains transcript from 1674. #", "# Last with any might be 1677. # But 1677 really contains transcript", "if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get", "ssl._create_unverified_context() start = 614 end = 617 # last comic with a reliable", "with a reliable transcript is roughly 1608? # Last with any might be", "urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get", "= unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start,", "= int(argv[2]) assert s < e start = s end = e +", "end): if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert", "n = python_obj['num'] assert comic_to_get == n T = python_obj['transcript'] panels = len(T.split('\\n\\n'))", "e start = s end = e + 1 def get_comic_by_num(n): url =", "<reponame>zimolzak/get-xkcd import json import urllib.request import ssl from sys import argv unverified =", "= urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for", "n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n)", "assert s < e start = s end = e + 1 def", "1 words = len(T.split()) print(str(n) + ',' + str(panels) + ',' + str(words))", "from sys import argv unverified = ssl._create_unverified_context() start = 614 end = 617", "e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json' response", "len(argv) == 3: s = int(argv[1]) e = int(argv[2]) assert s < e", "= e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json'", "s end = e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n)", "url = 'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url, context = unverified)", "unverified = ssl._create_unverified_context() start = 614 end = 617 # last comic with", "= int(argv[1]) e = int(argv[2]) assert s < e start = s end", "1 def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url,", "urllib.request import ssl from sys import argv unverified = ssl._create_unverified_context() start = 614", "= python_obj['num'] assert comic_to_get == n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) -", "= get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n T = python_obj['transcript'] panels", "< e start = s end = e + 1 def get_comic_by_num(n): url", "T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) +", "617 # last comic with a reliable transcript is roughly 1608? # Last", "start = s end = e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/'", "1608? # Last with any might be 1677. # But 1677 really contains", "http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1]) e = int(argv[2]) assert s", "get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n T = python_obj['transcript'] panels =", "= 614 end = 617 # last comic with a reliable transcript is", "'/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data)", "range(start, end): if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num']", "== 3: s = int(argv[1]) e = int(argv[2]) assert s < e start", "s = int(argv[1]) e = int(argv[2]) assert s < e start = s", "really contains transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) ==", "panels = len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) + ',' + str(panels)", "end = 617 # last comic with a reliable transcript is roughly 1608?", "1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1])", "e = int(argv[2]) assert s < e start = s end = e", "from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s =", "= python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) + ','", "= 617 # last comic with a reliable transcript is roughly 1608? #", "response = urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\")", "import ssl from sys import argv unverified = ssl._create_unverified_context() start = 614 end", "== 404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n", "if len(argv) == 3: s = int(argv[1]) e = int(argv[2]) assert s <", "import json import urllib.request import ssl from sys import argv unverified = ssl._create_unverified_context()", "'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data =", "json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if", "next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n T =", "argv unverified = ssl._create_unverified_context() start = 614 end = 617 # last comic", "start = 614 end = 617 # last comic with a reliable transcript", "= str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get", "1677 really contains transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv)", "str(n) + '/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data = str(response.read(), encoding='utf-8')", "with any might be 1677. # But 1677 really contains transcript from 1674.", "== n T = python_obj['transcript'] panels = len(T.split('\\n\\n')) - 1 words = len(T.split())", "encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get == 404:", "1677. # But 1677 really contains transcript from 1674. # someone noticed this:", "comic with a reliable transcript is roughly 1608? # Last with any might", "noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1]) e = int(argv[2])", "someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1]) e =", "roughly 1608? # Last with any might be 1677. # But 1677 really", "might be 1677. # But 1677 really contains transcript from 1674. # someone", "def get_comic_by_num(n): url = 'https://xkcd.com/' + str(n) + '/info.0.json' response = urllib.request.urlopen(url, context", "= ssl._create_unverified_context() start = 614 end = 617 # last comic with a", "3: s = int(argv[1]) e = int(argv[2]) assert s < e start =", "int(argv[2]) assert s < e start = s end = e + 1", "+ str(n) + '/info.0.json' response = urllib.request.urlopen(url, context = unverified) json_data = str(response.read(),", "is roughly 1608? # Last with any might be 1677. # But 1677", "json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get == 404: next python_obj", "import argv unverified = ssl._create_unverified_context() start = 614 end = 617 # last", "# last comic with a reliable transcript is roughly 1608? # Last with", "contains transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3:", "- 1 words = len(T.split()) print(str(n) + ',' + str(panels) + ',' +", "# But 1677 really contains transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433", "int(argv[1]) e = int(argv[2]) assert s < e start = s end =", "for comic_to_get in range(start, end): if comic_to_get == 404: next python_obj = get_comic_by_num(comic_to_get)", "ssl from sys import argv unverified = ssl._create_unverified_context() start = 614 end =", "But 1677 really contains transcript from 1674. # someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if", "# someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433 if len(argv) == 3: s = int(argv[1]) e", "print(\"comic_num,panels,words\") for comic_to_get in range(start, end): if comic_to_get == 404: next python_obj =", "404: next python_obj = get_comic_by_num(comic_to_get) n = python_obj['num'] assert comic_to_get == n T", "unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in range(start, end):", "len(T.split('\\n\\n')) - 1 words = len(T.split()) print(str(n) + ',' + str(panels) + ','", "context = unverified) json_data = str(response.read(), encoding='utf-8') return json.loads(json_data) print(\"comic_num,panels,words\") for comic_to_get in", "= s end = e + 1 def get_comic_by_num(n): url = 'https://xkcd.com/' +" ]
[ "\"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction = transaction def", "import TransactionPropertyNotFound class Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\",", "transaction): self._transaction = transaction def __str__(self): return str(self._transaction) def __getattr__(self, item): if item", "return str(self._transaction) def __getattr__(self, item): if item in self._transaction: return self._transaction[item] raise TransactionPropertyNotFound(\"%s", "item): if item in self._transaction: return self._transaction[item] raise TransactionPropertyNotFound(\"%s property has not found\")", "\"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction):", "{ \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\",", "__getattr__(self, item): if item in self._transaction: return self._transaction[item] raise TransactionPropertyNotFound(\"%s property has not", "KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\",", "from paypal_transactions_wrapper.exceptions import TransactionPropertyNotFound class Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\",", "def __init__(self, transaction): self._transaction = transaction def __str__(self): return str(self._transaction) def __getattr__(self, item):", "def __str__(self): return str(self._transaction) def __getattr__(self, item): if item in self._transaction: return self._transaction[item]", "\"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction = transaction def __str__(self):", "def __getattr__(self, item): if item in self._transaction: return self._transaction[item] raise TransactionPropertyNotFound(\"%s property has", "transaction def __str__(self): return str(self._transaction) def __getattr__(self, item): if item in self._transaction: return", "str(self._transaction) def __getattr__(self, item): if item in self._transaction: return self._transaction[item] raise TransactionPropertyNotFound(\"%s property", "\"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\",", "self._transaction = transaction def __str__(self): return str(self._transaction) def __getattr__(self, item): if item in", "\"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", }", "__str__(self): return str(self._transaction) def __getattr__(self, item): if item in self._transaction: return self._transaction[item] raise", "\"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction =", "\"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction = transaction def __str__(self): return str(self._transaction)", "\"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\":", "Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\":", "\"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\",", "paypal_transactions_wrapper.exceptions import TransactionPropertyNotFound class Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\":", "\"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\",", "\"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction = transaction def __str__(self): return", "class Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\",", "\"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\":", "\"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self,", "} def __init__(self, transaction): self._transaction = transaction def __str__(self): return str(self._transaction) def __getattr__(self,", "\"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction = transaction", "\"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def", "__init__(self, transaction): self._transaction = transaction def __str__(self): return str(self._transaction) def __getattr__(self, item): if", "\"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\": \"id\", \"STATUS\":", "= transaction def __str__(self): return str(self._transaction) def __getattr__(self, item): if item in self._transaction:", "TransactionPropertyNotFound class Transaction: KEY_MAP = { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\":", "= { \"TIMESTAMP\": \"date\", \"TIMEZONE\": \"timezone\", \"TYPE\": \"type\", \"EMAIL\": \"costumer_email\", \"NAME\": \"costumer_name\", \"TRANSACTIONID\":", "\"TRANSACTIONID\": \"id\", \"STATUS\": \"status\", \"AMT\": \"amount\", \"CURRENCYCODE\": \"currency\", } def __init__(self, transaction): self._transaction", "\"currency\", } def __init__(self, transaction): self._transaction = transaction def __str__(self): return str(self._transaction) def" ]
[ "values and you will have a single complete permutation. When the entire call", "usage, this is not a DP solution as it isn't relying on the", "the current elem in as the ith elem in a partial permutation then", "+ 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc' permutations =", "permutation. When the entire call is complete, it will have made all permutations.", "results - O(n!) time and space total time: 45mins :) ''' _permutations =", "string Solution: Iterate over the elements and lock the current elem in as", "I'm storing the perms outside the function to limit memory usage, this is", "have a single complete permutation. When the entire call is complete, it will", "remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation", "more remaining values and you will have a single complete permutation. When the", "it will have made all permutations. While I'm storing the perms outside the", "perms outside the function to limit memory usage, this is not a DP", "remaining values sans the ith element. Eventually there will be no more remaining", "45mins :) ''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters)", "all permutations of a given string Solution: Iterate over the elements and lock", "storing the perms outside the function to limit memory usage, this is not", "limit memory usage, this is not a DP solution as it isn't relying", "a given string Solution: Iterate over the elements and lock the current elem", "_permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation)", "in as the ith elem in a partial permutation then recurse with the", "solution as it isn't relying on the previous results - O(n!) time and", "remaining values and you will have a single complete permutation. When the entire", "the ith elem in a partial permutation then recurse with the remaining values", "made all permutations. While I'm storing the perms outside the function to limit", "all permutations. While I'm storing the perms outside the function to limit memory", "call is complete, it will have made all permutations. While I'm storing the", "return for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values =", "_permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values", "in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index +", "time: 45mins :) ''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if", "- O(n!) time and space total time: 45mins :) ''' _permutations = []", "new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation,", "lock the current elem in as the ith elem in a partial permutation", "_fill_permutations('', string) return set(_permutations) string = 'abc' permutations = all_permutations(string) print permutations print", "<filename>cracking-the-coding-interview/ch9-recursion-and-dynamic-programming/9.5-permutations.py ''' Problem: Return all permutations of a given string Solution: Iterate over", "len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation", "the function to limit memory usage, this is not a DP solution as", ":) ''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) ==", "partial permutation then recurse with the remaining values sans the ith element. Eventually", "and lock the current elem in as the ith elem in a partial", "= partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values)", "partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('',", "enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:]", "Solution: Iterate over the elements and lock the current elem in as the", "time and space total time: 45mins :) ''' _permutations = [] def all_permutations(string):", "the perms outside the function to limit memory usage, this is not a", "the previous results - O(n!) time and space total time: 45mins :) '''", "single complete permutation. When the entire call is complete, it will have made", "is not a DP solution as it isn't relying on the previous results", "will have a single complete permutation. When the entire call is complete, it", "with the remaining values sans the ith element. Eventually there will be no", "on the previous results - O(n!) time and space total time: 45mins :)", "remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string =", "not a DP solution as it isn't relying on the previous results -", "the entire call is complete, it will have made all permutations. While I'm", "elem in a partial permutation then recurse with the remaining values sans the", "sans the ith element. Eventually there will be no more remaining values and", "total time: 45mins :) ''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters):", "if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation =", "a DP solution as it isn't relying on the previous results - O(n!)", "in a partial permutation then recurse with the remaining values sans the ith", "While I'm storing the perms outside the function to limit memory usage, this", "a single complete permutation. When the entire call is complete, it will have", "+ remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc'", "Iterate over the elements and lock the current elem in as the ith", "When the entire call is complete, it will have made all permutations. While", "string) return set(_permutations) string = 'abc' permutations = all_permutations(string) print permutations print len(permutations)", "the elements and lock the current elem in as the ith elem in", "this is not a DP solution as it isn't relying on the previous", "outside the function to limit memory usage, this is not a DP solution", "index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index] +", "relying on the previous results - O(n!) time and space total time: 45mins", "def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value in", "and space total time: 45mins :) ''' _permutations = [] def all_permutations(string): def", "complete permutation. When the entire call is complete, it will have made all", "DP solution as it isn't relying on the previous results - O(n!) time", "[] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for", "== 0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation +", "value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index] + remaining_characters[index", "the ith element. Eventually there will be no more remaining values and you", "is complete, it will have made all permutations. While I'm storing the perms", "_fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters):", "space total time: 45mins :) ''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation,", "there will be no more remaining values and you will have a single", "for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value new_remaining_values = remaining_characters[:index]", "as the ith elem in a partial permutation then recurse with the remaining", "''' _permutations = [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0:", "all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index, value", "elements and lock the current elem in as the ith elem in a", "0: _permutations.append(partial_permutation) return for index, value in enumerate(remaining_characters): new_partial_permutation = partial_permutation + value", "elem in as the ith elem in a partial permutation then recurse with", "complete, it will have made all permutations. While I'm storing the perms outside", "_fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc' permutations = all_permutations(string) print", "be no more remaining values and you will have a single complete permutation.", "have made all permutations. While I'm storing the perms outside the function to", "new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations)", "1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc' permutations = all_permutations(string)", "Return all permutations of a given string Solution: Iterate over the elements and", "ith element. Eventually there will be no more remaining values and you will", "Eventually there will be no more remaining values and you will have a", "isn't relying on the previous results - O(n!) time and space total time:", "element. Eventually there will be no more remaining values and you will have", "then recurse with the remaining values sans the ith element. Eventually there will", "''' Problem: Return all permutations of a given string Solution: Iterate over the", "as it isn't relying on the previous results - O(n!) time and space", "and you will have a single complete permutation. When the entire call is", "to limit memory usage, this is not a DP solution as it isn't", "= [] def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return", "given string Solution: Iterate over the elements and lock the current elem in", "of a given string Solution: Iterate over the elements and lock the current", "recurse with the remaining values sans the ith element. Eventually there will be", "no more remaining values and you will have a single complete permutation. When", "will be no more remaining values and you will have a single complete", "permutations. While I'm storing the perms outside the function to limit memory usage,", "it isn't relying on the previous results - O(n!) time and space total", "+ value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string)", "over the elements and lock the current elem in as the ith elem", "Problem: Return all permutations of a given string Solution: Iterate over the elements", "the remaining values sans the ith element. Eventually there will be no more", "new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc' permutations = all_permutations(string) print permutations", "previous results - O(n!) time and space total time: 45mins :) ''' _permutations", "permutation then recurse with the remaining values sans the ith element. Eventually there", "= remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string", "will have made all permutations. While I'm storing the perms outside the function", "you will have a single complete permutation. When the entire call is complete,", "function to limit memory usage, this is not a DP solution as it", "value new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return", "current elem in as the ith elem in a partial permutation then recurse", "a partial permutation then recurse with the remaining values sans the ith element.", "values sans the ith element. Eventually there will be no more remaining values", "permutations of a given string Solution: Iterate over the elements and lock the", "ith elem in a partial permutation then recurse with the remaining values sans", "memory usage, this is not a DP solution as it isn't relying on", "remaining_characters[index + 1:] _fill_permutations(new_partial_permutation, new_remaining_values) _fill_permutations('', string) return set(_permutations) string = 'abc' permutations", "entire call is complete, it will have made all permutations. While I'm storing", "O(n!) time and space total time: 45mins :) ''' _permutations = [] def", "def all_permutations(string): def _fill_permutations(partial_permutation, remaining_characters): if len(remaining_characters) == 0: _permutations.append(partial_permutation) return for index," ]
[ "знаков в начале # и конце словаи записываем это в файл # ?", "= open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество куда бужем складывать значения", "очищаем от спец. знаков # в начале слов и в конце. фунция чтения", "or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',')", "if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or", "file text_file_9.4.txt. # Where will all the unique words be written new_file =", "new to programming? \" \\ \"'If not then we presume you will be", "for information \" \\ \"about why and how to get started with Python.", "в конце. фунция чтения файла и определения в нем # уникальных слов def", "затем показывает список всех # уникальных слов в файле. (Подсказка: храните слова в", "or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else:", "be looking for information \" \\ \"about why and how to get started", "i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?')", "continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(')", "information \" \\ \"about why and how to get started with Python. Fortunately", "в качестве элементов # множества.) def main(): my_str = \"Welcome! Are are are", "\"about why and how to get started with Python. Fortunately an \" \\", "then we presume you will be looking for information \" \\ \"about why", "от спец. знаков # в начале слов и в конце. фунция чтения файла", "слов def creat_file(str_literal): # creat file text_file_9.4.txt. # Where will all the unique", "be written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество куда", ". { } ( ) \" ' for i in content: if i.endswith('.')", "{ } ( ) \" ' for i in content: if i.endswith('.') or", "'\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\", "programmer in any programming language \" \\ \"(whatever it may be) can pick", "текстовый литерал в файл и очищаем от спец. знаков # в начале слов", "print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique words in the text", "# # 4. Уникальные слова. Напишите программу, которая открывает заданный # текстовый файл", "\"'If not then we presume you will be looking for information \" \\", "файле. (Подсказка: храните слова в качестве элементов # множества.) def main(): my_str =", "# creat file text_file_9.4.txt. # Where will all the unique words be written", "Where will all the unique words be written new_file = open('text_file_9.4.txt', 'w') content", "i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] +", "открывает заданный # текстовый файл (text_file_9.4.txt) и затем показывает список всех # уникальных", "начале # и конце словаи записываем это в файл # ? ! ,", "will be looking for information \" \\ \"about why and how to get", "\\ \"about why and how to get started with Python. Fortunately an \"", "in the text =\", len(creat_file(my_str))) # записываем текстовый литерал в файл и очищаем", "} ( ) \" ' for i in content: if i.endswith('.') or i.endswith(',')", "Python very quickly. \" \\ \"It's also easy for beginners to use and", "= set([]) # Циклом очищаем циклом все слова от знаков в начале #", "i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.')", "что бы ущнать количество content_clear = set([]) # Циклом очищаем циклом все слова", "словаи записываем это в файл # ? ! , . { } (", "the text =\", len(creat_file(my_str))) # записываем текстовый литерал в файл и очищаем от", "программу, которая открывает заданный # текстовый файл (text_file_9.4.txt) и затем показывает список всех", "unique words # # 4. Уникальные слова. Напишите программу, которая открывает заданный #", "список всех # уникальных слов в файле. (Подсказка: храните слова в качестве элементов", "content = str_literal.split() # пустое множество куда бужем складывать значения и потом возмем", "файла и определения в нем # уникальных слов def creat_file(str_literal): # creat file", "\") print(creat_file(my_str)) print(\"Total unique words in the text =\", len(creat_file(my_str))) # записываем текстовый", "! , . { } ( ) \" ' for i in content:", "и в конце. фунция чтения файла и определения в нем # уникальных слов", "i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\")", "words be written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество", "\\ \"'If not then we presume you will be looking for information \"", "looking for information \" \\ \"about why and how to get started with", "очищаем циклом все слова от знаков в начале # и конце словаи записываем", "все слова от знаков в начале # и конце словаи записываем это в", "i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?')", "\\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n') content_clear.update([i]) new_file.close()", "are below: \") print(creat_file(my_str)) print(\"Total unique words in the text =\", len(creat_file(my_str))) #", "written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество куда бужем", "Уникальные слова. Напишите программу, которая открывает заданный # текстовый файл (text_file_9.4.txt) и затем", "# Циклом очищаем циклом все слова от знаков в начале # и конце", "элементов # множества.) def main(): my_str = \"Welcome! Are are are you completely", "заданный # текстовый файл (text_file_9.4.txt) и затем показывает список всех # уникальных слов", "we presume you will be looking for information \" \\ \"about why and", "quickly. \" \\ \"It's also easy for beginners to use and learn, so", "or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\", "\" \\ \"It's also easy for beginners to use and learn, so jump", "слова от знаков в начале # и конце словаи записываем это в файл", "or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if", "конце. фунция чтения файла и определения в нем # уникальных слов def creat_file(str_literal):", "def main(): my_str = \"Welcome! Are are are you completely new to programming?", ") \" ' for i in content: if i.endswith('.') or i.endswith(',') \\ or", "файл # ? ! , . { } ( ) \" ' for", "can pick up Python very quickly. \" \\ \"It's also easy for beginners", "jump in!\" print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique words in", "i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!')", "множества.) def main(): my_str = \"Welcome! Are are are you completely new to", "литерал в файл и очищаем от спец. знаков # в начале слов и", "why and how to get started with Python. Fortunately an \" \\ \"experienced", "(text_file_9.4.txt) и затем показывает список всех # уникальных слов в файле. (Подсказка: храните", "=\", len(creat_file(my_str))) # записываем текстовый литерал в файл и очищаем от спец. знаков", "get started with Python. Fortunately an \" \\ \"experienced programmer in any programming", "бы ущнать количество content_clear = set([]) # Циклом очищаем циклом все слова от", "def creat_file(str_literal): # creat file text_file_9.4.txt. # Where will all the unique words", "will all the unique words be written new_file = open('text_file_9.4.txt', 'w') content =", "use and learn, so jump in!\" print() print(\"Unique words are below: \") print(creat_file(my_str))", "words in the text =\", len(creat_file(my_str))) # записываем текстовый литерал в файл и", "определения в нем # уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt. #", "for i in content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':')", "content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or", "i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n')", "в файл # ? ! , . { } ( ) \" '", "in content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or", "которая открывает заданный # текстовый файл (text_file_9.4.txt) и затем показывает список всех #", "programming language \" \\ \"(whatever it may be) can pick up Python very", "# уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt. # Where will all", "и затем показывает список всех # уникальных слов в файле. (Подсказка: храните слова", "+ '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':')", "len(creat_file(my_str))) # записываем текстовый литерал в файл и очищаем от спец. знаков #", "конце словаи записываем это в файл # ? ! , . { }", "in!\" print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique words in the", "в файл и очищаем от спец. знаков # в начале слов и в", "you completely new to programming? \" \\ \"'If not then we presume you", "# Where will all the unique words be written new_file = open('text_file_9.4.txt', 'w')", "\\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}')", "i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue", "are are you completely new to programming? \" \\ \"'If not then we", "в файле. (Подсказка: храните слова в качестве элементов # множества.) def main(): my_str", "\"Welcome! Are are are you completely new to programming? \" \\ \"'If not", "= str_literal.split() # пустое множество куда бужем складывать значения и потом возмем #", "or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n') content_clear.update([i])", "это в файл # ? ! , . { } ( ) \"", "for beginners to use and learn, so jump in!\" print() print(\"Unique words are", "started with Python. Fortunately an \" \\ \"experienced programmer in any programming language", "всех # уникальных слов в файле. (Подсказка: храните слова в качестве элементов #", "<filename>chapter_09/04_unique_words.py # unique words # # 4. Уникальные слова. Напишите программу, которая открывает", "or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or", "\\ \"experienced programmer in any programming language \" \\ \"(whatever it may be)", "unique words in the text =\", len(creat_file(my_str))) # записываем текстовый литерал в файл", "not then we presume you will be looking for information \" \\ \"about", "и потом возмем # len что бы ущнать количество content_clear = set([]) #", "спец. знаков # в начале слов и в конце. фунция чтения файла и", "уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt. # Where will all the", "бужем складывать значения и потом возмем # len что бы ущнать количество content_clear", "складывать значения и потом возмем # len что бы ущнать количество content_clear =", "new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество куда бужем складывать", "# текстовый файл (text_file_9.4.txt) и затем показывает список всех # уникальных слов в", "показывает список всех # уникальных слов в файле. (Подсказка: храните слова в качестве", "? ! , . { } ( ) \" ' for i in", "i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\", "\\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{')", "or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i +", "easy for beginners to use and learn, so jump in!\" print() print(\"Unique words", "all the unique words be written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split()", "words # # 4. Уникальные слова. Напишите программу, которая открывает заданный # текстовый", "language \" \\ \"(whatever it may be) can pick up Python very quickly.", "print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique words in the text =\",", "or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\", "\\ \"It's also easy for beginners to use and learn, so jump in!\"", "\" \\ \"(whatever it may be) can pick up Python very quickly. \"", "слов в файле. (Подсказка: храните слова в качестве элементов # множества.) def main():", "also easy for beginners to use and learn, so jump in!\" print() print(\"Unique", "множество куда бужем складывать значения и потом возмем # len что бы ущнать", "below: \") print(creat_file(my_str)) print(\"Total unique words in the text =\", len(creat_file(my_str))) # записываем", "# len что бы ущнать количество content_clear = set([]) # Циклом очищаем циклом", "чтения файла и определения в нем # уникальных слов def creat_file(str_literal): # creat", "\\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n')", "to programming? \" \\ \"'If not then we presume you will be looking", "print(creat_file(my_str)) print(\"Total unique words in the text =\", len(creat_file(my_str))) # записываем текстовый литерал", "знаков # в начале слов и в конце. фунция чтения файла и определения", "храните слова в качестве элементов # множества.) def main(): my_str = \"Welcome! Are", "записываем это в файл # ? ! , . { } ( )", "# уникальных слов в файле. (Подсказка: храните слова в качестве элементов # множества.)", "# ? ! , . { } ( ) \" ' for i", "print(\"Total unique words in the text =\", len(creat_file(my_str))) # записываем текстовый литерал в", "начале слов и в конце. фунция чтения файла и определения в нем #", "(Подсказка: храните слова в качестве элементов # множества.) def main(): my_str = \"Welcome!", "learn, so jump in!\" print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique", "и очищаем от спец. знаков # в начале слов и в конце. фунция", "i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i", "\" \\ \"about why and how to get started with Python. Fortunately an", "or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or", "\\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue", "you will be looking for information \" \\ \"about why and how to", "my_str = \"Welcome! Are are are you completely new to programming? \" \\", "new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or", "i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or", "it may be) can pick up Python very quickly. \" \\ \"It's also", "to get started with Python. Fortunately an \" \\ \"experienced programmer in any", "to use and learn, so jump in!\" print() print(\"Unique words are below: \")", "\\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]]) continue if i.startswith('.') or", "up Python very quickly. \" \\ \"It's also easy for beginners to use", "so jump in!\" print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total unique words", "качестве элементов # множества.) def main(): my_str = \"Welcome! Are are are you", "and learn, so jump in!\" print() print(\"Unique words are below: \") print(creat_file(my_str)) print(\"Total", "words are below: \") print(creat_file(my_str)) print(\"Total unique words in the text =\", len(creat_file(my_str)))", "'w') content = str_literal.split() # пустое множество куда бужем складывать значения и потом", "Are are are you completely new to programming? \" \\ \"'If not then", "very quickly. \" \\ \"It's also easy for beginners to use and learn,", "or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:]", "or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or", "+ '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n') content_clear.update([i]) new_file.close() return content_clear main()", "и конце словаи записываем это в файл # ? ! , . {", "количество content_clear = set([]) # Циклом очищаем циклом все слова от знаков в", "content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')')", "слова в качестве элементов # множества.) def main(): my_str = \"Welcome! Are are", "\\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'):", "programming? \" \\ \"'If not then we presume you will be looking for", "файл и очищаем от спец. знаков # в начале слов и в конце.", "# множества.) def main(): my_str = \"Welcome! Are are are you completely new", "and how to get started with Python. Fortunately an \" \\ \"experienced programmer", "куда бужем складывать значения и потом возмем # len что бы ущнать количество", "' for i in content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or", "фунция чтения файла и определения в нем # уникальных слов def creat_file(str_literal): #", "i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'):", "or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1]", "\"experienced programmer in any programming language \" \\ \"(whatever it may be) can", "# в начале слов и в конце. фунция чтения файла и определения в", "i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\", "or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n')", "пустое множество куда бужем складывать значения и потом возмем # len что бы", "и определения в нем # уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt.", "возмем # len что бы ущнать количество content_clear = set([]) # Циклом очищаем", "Fortunately an \" \\ \"experienced programmer in any programming language \" \\ \"(whatever", "Напишите программу, которая открывает заданный # текстовый файл (text_file_9.4.txt) и затем показывает список", "записываем текстовый литерал в файл и очищаем от спец. знаков # в начале", "значения и потом возмем # len что бы ущнать количество content_clear = set([])", "в начале # и конце словаи записываем это в файл # ? !", "слов и в конце. фунция чтения файла и определения в нем # уникальных", "нем # уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt. # Where will", "# записываем текстовый литерал в файл и очищаем от спец. знаков # в", "str_literal.split() # пустое множество куда бужем складывать значения и потом возмем # len", "в нем # уникальных слов def creat_file(str_literal): # creat file text_file_9.4.txt. # Where", "Циклом очищаем циклом все слова от знаков в начале # и конце словаи", "or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or", "потом возмем # len что бы ущнать количество content_clear = set([]) # Циклом", "( ) \" ' for i in content: if i.endswith('.') or i.endswith(',') \\", "an \" \\ \"experienced programmer in any programming language \" \\ \"(whatever it", "# и конце словаи записываем это в файл # ? ! , .", "i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n') content_clear.update([i]) new_file.close() return", "with Python. Fortunately an \" \\ \"experienced programmer in any programming language \"", "\\ \"(whatever it may be) can pick up Python very quickly. \" \\", "len что бы ущнать количество content_clear = set([]) # Циклом очищаем циклом все", "in any programming language \" \\ \"(whatever it may be) can pick up", "content_clear = set([]) # Циклом очищаем циклом все слова от знаков в начале", ", . { } ( ) \" ' for i in content: if", "or i.endswith(')') or i.endswith('?') \\ or i.endswith('}') or i.endswith('\"'): new_file.write(i[:-1] + '\\n') content_clear.update([i[:-1]])", "new_file.write(i[1:] + '\\n') content_clear.update([i[1:]]) continue else: new_file.write(i + '\\n') content_clear.update([i]) new_file.close() return content_clear", "completely new to programming? \" \\ \"'If not then we presume you will", "слова. Напишите программу, которая открывает заданный # текстовый файл (text_file_9.4.txt) и затем показывает", "# 4. Уникальные слова. Напишите программу, которая открывает заданный # текстовый файл (text_file_9.4.txt)", "open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое множество куда бужем складывать значения и", "set([]) # Циклом очищаем циклом все слова от знаков в начале # и", "\" \\ \"'If not then we presume you will be looking for information", "pick up Python very quickly. \" \\ \"It's also easy for beginners to", "the unique words be written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() #", "how to get started with Python. Fortunately an \" \\ \"experienced programmer in", "текстовый файл (text_file_9.4.txt) и затем показывает список всех # уникальных слов в файле.", "beginners to use and learn, so jump in!\" print() print(\"Unique words are below:", "в начале слов и в конце. фунция чтения файла и определения в нем", "циклом все слова от знаков в начале # и конце словаи записываем это", "от знаков в начале # и конце словаи записываем это в файл #", "\" \\ \"experienced programmer in any programming language \" \\ \"(whatever it may", "файл (text_file_9.4.txt) и затем показывает список всех # уникальных слов в файле. (Подсказка:", "presume you will be looking for information \" \\ \"about why and how", "i.startswith('(') or i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] +", "\" ' for i in content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!')", "unique words be written new_file = open('text_file_9.4.txt', 'w') content = str_literal.split() # пустое", "i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\ or i.endswith(')') or i.endswith('?') \\ or", "\"It's also easy for beginners to use and learn, so jump in!\" print()", "4. Уникальные слова. Напишите программу, которая открывает заданный # текстовый файл (text_file_9.4.txt) и", "if i.startswith('.') or i.startswith(',') \\ or i.startswith('!') or i.startswith(':') \\ or i.startswith('(') or", "i.startswith('?') \\ or i.startswith('{') or i.startswith(\"'\") or \\ i.startswith('\"'): new_file.write(i[1:] + '\\n') content_clear.update([i[1:]])", "are you completely new to programming? \" \\ \"'If not then we presume", "i in content: if i.endswith('.') or i.endswith(',') \\ or i.endswith('!') or i.endswith(':') \\", "Python. Fortunately an \" \\ \"experienced programmer in any programming language \" \\", "# unique words # # 4. Уникальные слова. Напишите программу, которая открывает заданный", "be) can pick up Python very quickly. \" \\ \"It's also easy for", "уникальных слов в файле. (Подсказка: храните слова в качестве элементов # множества.) def", "creat_file(str_literal): # creat file text_file_9.4.txt. # Where will all the unique words be", "text_file_9.4.txt. # Where will all the unique words be written new_file = open('text_file_9.4.txt',", "\"(whatever it may be) can pick up Python very quickly. \" \\ \"It's", "any programming language \" \\ \"(whatever it may be) can pick up Python", "main(): my_str = \"Welcome! Are are are you completely new to programming? \"", "may be) can pick up Python very quickly. \" \\ \"It's also easy", "creat file text_file_9.4.txt. # Where will all the unique words be written new_file", "# пустое множество куда бужем складывать значения и потом возмем # len что", "ущнать количество content_clear = set([]) # Циклом очищаем циклом все слова от знаков", "text =\", len(creat_file(my_str))) # записываем текстовый литерал в файл и очищаем от спец.", "= \"Welcome! Are are are you completely new to programming? \" \\ \"'If" ]
[ "= API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of", "available_prints = {} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj):", "with the name with height, width, and layer height''' return self.test_prints[name](height, width, layer_height,", "self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of test prints by name''' return", "height = 30 width = 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) '''", "for a print with the name with height, width, and layer height''' return", "from peachyprinter.domain.layer_generator import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for", "name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'): available_prints[obj.name] = obj return", "by name''' return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the", "with height, width, and layer height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self):", "dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'):", "= getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'): available_prints[obj.name] =", "for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator):", "API = TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width = 30 layer_height", "name''' return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the layer", "self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the layer generator for", "and layer height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints = {}", "generator for a print with the name with height, width, and layer height'''", "= API.test_print_names()[0] height = 30 width = 30 layer_height = 0.01 test_print =", "= 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints =", "getting test prints Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height =", "name, height, width, layer_height, speed=100): '''Gets the layer generator for a print with", "test prints Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height = 30", "getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'): available_prints[obj.name] = obj", "def _get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name)", "class TestPrintAPI(object): '''Api used for getting test prints Typical usage: API = TestPrintAPI()", "if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'): available_prints[obj.name] = obj return available_prints", "test_print_names(self): '''Returns list of test prints by name''' return self.test_prints.keys() def get_test_print(self, name,", "test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list", "layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def", "0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns", "width, layer_height, speed) def _get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators): obj", "width = 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints", "= 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self):", "TestPrintAPI(object): '''Api used for getting test prints Typical usage: API = TestPrintAPI() selected_print", "for getting test prints Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height", "obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj, 'name'): available_prints[obj.name]", "def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the layer generator for a", "layer height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints = {} for", "the name with height, width, and layer height''' return self.test_prints[name](height, width, layer_height, speed)", "'''Gets the layer generator for a print with the name with height, width,", "width, layer_height, speed=100): '''Gets the layer generator for a print with the name", "__init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of test prints by name'''", "list of test prints by name''' return self.test_prints.keys() def get_test_print(self, name, height, width,", "test prints by name''' return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100):", "height, width, and layer height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints", "a print with the name with height, width, and layer height''' return self.test_prints[name](height,", "of test prints by name''' return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height,", "import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test prints Typical usage: API", "= TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width = 30 layer_height =", "width, and layer height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints =", "speed=100): '''Gets the layer generator for a print with the name with height,", "speed) def _get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators,", "return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints = {} for name in", "30 width = 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self):", "used for getting test prints Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0]", "prints by name''' return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets", "'''Returns list of test prints by name''' return self.test_prints.keys() def get_test_print(self, name, height,", "print with the name with height, width, and layer height''' return self.test_prints[name](height, width,", "layer_height, speed=100): '''Gets the layer generator for a print with the name with", "in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if hasattr(obj,", "''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of test prints", "usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width = 30", "{} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj,", "get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the layer generator for a print", "LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test prints", "import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test", "import inspect from peachyprinter.domain.layer_generator import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api", "self._get_test_prints() def test_print_names(self): '''Returns list of test prints by name''' return self.test_prints.keys() def", "from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test prints Typical", "def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of test prints by", "the layer generator for a print with the name with height, width, and", "peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test prints Typical usage:", "name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if issubclass(obj, LayerGenerator): if", "return self.test_prints.keys() def get_test_print(self, name, height, width, layer_height, speed=100): '''Gets the layer generator", "'''Api used for getting test prints Typical usage: API = TestPrintAPI() selected_print =", "height''' return self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints = {} for name", "peachyprinter.domain.layer_generator import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used for getting", "<filename>src/peachyprinter/api/test_print_api.py<gh_stars>10-100 import inspect from peachyprinter.domain.layer_generator import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object):", "API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints() def test_print_names(self): '''Returns list of test", "Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width =", "= 30 width = 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def", "layer_height, speed) def _get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators): obj =", "layer generator for a print with the name with height, width, and layer", "= self._get_test_prints() def test_print_names(self): '''Returns list of test prints by name''' return self.test_prints.keys()", "_get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if", "height, width, layer_height, speed=100): '''Gets the layer generator for a print with the", "name with height, width, and layer height''' return self.test_prints[name](height, width, layer_height, speed) def", "selected_print = API.test_print_names()[0] height = 30 width = 30 layer_height = 0.01 test_print", "API.test_print_names()[0] height = 30 width = 30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height)", "def test_print_names(self): '''Returns list of test prints by name''' return self.test_prints.keys() def get_test_print(self,", "prints Typical usage: API = TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width", "print_test_layer_generators class TestPrintAPI(object): '''Api used for getting test prints Typical usage: API =", "inspect from peachyprinter.domain.layer_generator import LayerGenerator from peachyprinter.infrastructure import print_test_layer_generators class TestPrintAPI(object): '''Api used", "self.test_prints[name](height, width, layer_height, speed) def _get_test_prints(self): available_prints = {} for name in dir(print_test_layer_generators):", "= {} for name in dir(print_test_layer_generators): obj = getattr(print_test_layer_generators, name) if inspect.isclass(obj): if", "30 layer_height = 0.01 test_print = API.get_test_print(selected_print,height,width,layer_height) ''' def __init__(self): self.test_prints = self._get_test_prints()", "TestPrintAPI() selected_print = API.test_print_names()[0] height = 30 width = 30 layer_height = 0.01" ]
[ "chord = 1 theta = 1 + chord / rho_step while theta <=", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "documentation are # those of the authors and should not be interpreted as", "#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013", ") u_model = np.eye(4).astype( np.float32 ) collection = DashLines() lw = 20 x0,y0", "This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from OpenGL", "without # modification, are permitted provided that the following conditions are met: #", "theta ) y = rho * np.sin( theta ) P.append( (x,y) ) theta", "All rights reserved. # # Redistribution and use in source and binary forms,", "0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import sys", "if sys.platform == 'darwin': import ctypes from OpenGL import platform try: glutInitDisplayString =", "= DashLines() lw = 20 x0,y0 = 500.0, 500.0 coils = 12 rho_max", "retain the above copyright notice, # this list of conditions and the following", "either expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as", "provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P.", "'\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps,", "THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from OpenGL import", "provided that the following conditions are met: # # 1. Redistributions of source", "u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height):", "# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO #", "coils = 12 rho_max = 450. theta_max = coils * 2 * np.pi", "OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( )", "are met: # # 1. Redistributions of source code must retain the above", "12 rho_max = 450. theta_max = coils * 2 * np.pi rho_step =", "conditions and the following disclaimer in the # documentation and/or other materials provided", "official # policies, either expressed or implied, of Nicolas P. Rougier. # -----------------------------------------------------------------------------", "and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED", "contained in the software and documentation are # those of the authors and", "np.sin( theta ) P.append( (x,y) ) theta += chord / rho chord +=", "of the authors and should not be interpreted as representing official # policies,", "AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view", "interpreted as representing official # policies, either expressed or implied, of Nicolas P.", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE", "ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "permitted provided that the following conditions are met: # # 1. Redistributions of", "# Copyright (C) 2013 <NAME>. All rights reserved. # # Redistribution and use", "OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR", "display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes", "theta_max: rho = rho_step * theta x = rho * np.cos( theta )", "DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE", "def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view'", "dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil", "hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating", "notice, this list of conditions and the following disclaimer in the # documentation", "1 + chord / rho_step while theta <= theta_max: rho = rho_step *", "NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING,", "FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING,", "the software and documentation are # those of the authors and should not", "rho = rho_step * theta x = rho * np.cos( theta ) y", "collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def", "with or without # modification, are permitted provided that the following conditions are", "20 x0,y0 = 500.0, 500.0 coils = 12 rho_max = 450. theta_max =", "or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as np import", "gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 )", "500.0, 500.0 coils = 12 rho_max = 450. theta_max = coils * 2", "IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS", "and documentation are # those of the authors and should not be interpreted", "EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #", "the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS", "import numpy as np import OpenGL.GL as gl from transforms import ortho #", "1 theta = 1 + chord / rho_step while theta <= theta_max: rho", "following conditions are met: # # 1. Redistributions of source code must retain", "NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL,", "# # 1. Redistributions of source code must retain the above copyright notice,", "disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright", "BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES,", ": u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...]", "# HiDPI support for retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if", "# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", ") gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 )", "of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as", "# ------------------------------------- if __name__ == '__main__': import sys import OpenGL.GLUT as glut from", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER", "# This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from", "modification, are permitted provided that the following conditions are met: # # 1.", "# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY", "theta x = rho * np.cos( theta ) y = rho * np.sin(", "# documentation and/or other materials provided with the distribution. # # THIS SOFTWARE", "PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED", "rights reserved. # # Redistribution and use in source and binary forms, with", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA,", "coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights reserved.", ") text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE |", "----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights reserved. # # Redistribution and", "SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR", "Redistributions in binary form must reproduce the above copyright # notice, this list", "None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass", "y = rho * np.sin( theta ) P.append( (x,y) ) theta += chord", "following disclaimer. # # 2. Redistributions in binary form must reproduce the above", "* 2 * np.pi rho_step = rho_max / theta_max P=[] chord = 1", "+ chord / rho_step while theta <= theta_max: rho = rho_step * theta", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER", "Copyright (C) 2013 <NAME>. All rights reserved. # # Redistribution and use in", "# # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND", "glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import sys import OpenGL.GLUT as glut", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)", "= np.eye(4).astype( np.float32 ) collection = DashLines() lw = 20 x0,y0 = 500.0,", "of source code must retain the above copyright notice, # this list of", "argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8", "argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE", "linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0),", "Redistributions of source code must retain the above copyright notice, # this list", "Rougier. # ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as gl from transforms", "or without # modification, are permitted provided that the following conditions are met:", "# ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as gl from transforms import", "OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE", "'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text = ctypes.c_char_p(\"rgba", "notice, # this list of conditions and the following disclaimer. # # 2.", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR", "# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED", "glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for retina display #", "met: # # 1. Redistributions of source code must retain the above copyright", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS", "glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA,", "# Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND )", "# notice, this list of conditions and the following disclaimer in the #", "u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection = DashLines()", "ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE", "resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double", "SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT,", "ctypes from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p],", "------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale", "import OpenGL.GL as gl from transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1)", "height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key,", "if __name__ == '__main__': import sys import OpenGL.GLUT as glut from dash_lines_2D import", "LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # #", "<= theta_max: rho = rho_step * theta x = rho * np.cos( theta", "sys.platform == 'darwin': import ctypes from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction(", "fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )", "ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED", "np.pi rho_step = rho_max / theta_max P=[] chord = 1 theta = 1", "= 500.0, 500.0 coils = 12 rho_max = 450. theta_max = coils *", "utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights reserved. #", "500.0 coils = 12 rho_max = 450. theta_max = coils * 2 *", "collection = DashLines() lw = 20 x0,y0 = 500.0, 500.0 coils = 12", "/ rho_step while theta <= theta_max: rho = rho_step * theta x =", "TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "doc='glutInitDisplayString( ) -> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\")", "gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers()", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF #", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #", "height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height)", "gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32", "conditions and the following disclaimer. # # 2. Redistributions in binary form must", "and use in source and binary forms, with or without # modification, are", "from transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms=", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views", "OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained", "rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some", "AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and", "CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>.", "INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0,", "conditions are met: # # 1. Redistributions of source code must retain the", "this list of conditions and the following disclaimer in the # documentation and/or", "spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init", "theta += chord / rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern", "color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1), linewidth=lw, dash_pattern = 'dashed') glut.glutMainLoop()", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR", "x = rho * np.cos( theta ) y = rho * np.sin( theta", "IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR", "-= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL", "u_model = np.eye(4).astype( np.float32 ) collection = DashLines() lw = 20 x0,y0 =", "copyright notice, # this list of conditions and the following disclaimer. # #", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS", "= 12 rho_max = 450. theta_max = coils * 2 * np.pi rho_step", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "# THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY", "# ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights reserved. # # Redistribution", "450. theta_max = coils * 2 * np.pi rho_step = rho_max / theta_max", "for retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin':", "OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", ") y = rho * np.sin( theta ) P.append( (x,y) ) theta +=", "on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # -------------------------------------", "lw = 20 x0,y0 = 500.0, 500.0 coils = 12 rho_max = 450.", "glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from OpenGL import platform", "Redistribution and use in source and binary forms, with or without # modification,", "gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model =", "OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "source and binary forms, with or without # modification, are permitted provided that", "collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern =", "| glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60", "np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection", "WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF", "above copyright # notice, this list of conditions and the following disclaimer in", "materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS", "from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString(", "binary form must reproduce the above copyright # notice, this list of conditions", "form must reproduce the above copyright # notice, this list of conditions and", "rho_step = rho_max / theta_max P=[] chord = 1 theta = 1 +", "and should not be interpreted as representing official # policies, either expressed or", "STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay()", "policies, either expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy", "# # 2. Redistributions in binary form must reproduce the above copyright #", "glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1)", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING", "IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR #", "P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL,", "expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as np", "OpenGL.GL as gl from transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT", "glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer,", "on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable(", "# ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' :", "theta_max P=[] chord = 1 theta = 1 + chord / rho_step while", "x0,y0 = 500.0, 500.0 coils = 12 rho_max = 450. theta_max = coils", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS;", "= 450. theta_max = coils * 2 * np.pi rho_step = rho_max /", "# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "-*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights", "the above copyright # notice, this list of conditions and the following disclaimer", "on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width,", "try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=()", "y): if key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05", "numpy as np import OpenGL.GL as gl from transforms import ortho # -------------------------------------", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE,", "= platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text", "glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps =", "this list of conditions and the following disclaimer. # # 2. Redistributions in", "gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) #", "P=[] chord = 1 theta = 1 + chord / rho_step while theta", "HiDPI support for retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform", "DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT", "documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS", ") P.append( (x,y) ) theta += chord / rho chord += .05 collection.append(P,", "and the following disclaimer in the # documentation and/or other materials provided with", "not be interpreted as representing official # policies, either expressed or implied, of", "sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer,", "CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", ": u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0,", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY,", "forms, with or without # modification, are permitted provided that the following conditions", "glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc(", "np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection = DashLines() lw = 20", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR", "# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "in source and binary forms, with or without # modification, are permitted provided", "| glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps", "dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1),", "OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "# # Redistribution and use in source and binary forms, with or without", "'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1), linewidth=lw, dash_pattern", "as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for retina display", "= 1 theta = 1 + chord / rho_step while theta <= theta_max:", "coils * 2 * np.pi rho_step = rho_max / theta_max P=[] chord =", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE,", "in the # documentation and/or other materials provided with the distribution. # #", "{'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width,", "except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display)", "IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "= 20 x0,y0 = 500.0, 500.0 coils = 12 rho_max = 450. theta_max", "DashLines glut.glutInit(sys.argv) # HiDPI support for retina display # This requires glut from", "/ theta_max P=[] chord = 1 theta = 1 + chord / rho_step", "the authors and should not be interpreted as representing official # policies, either", "authors and should not be interpreted as representing official # policies, either expressed", "stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000,", "def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() #", ") u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection =", "P.append( (x,y) ) theta += chord / rho chord += .05 collection.append(P, translate=(x0,y0),", "# # The views and conclusions contained in the software and documentation are", "= rho * np.sin( theta ) P.append( (x,y) ) theta += chord /", "collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__':", "+= .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1,", "NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A", "# policies, either expressed or implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import", ") gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model", "= coils * 2 * np.pi rho_step = rho_max / theta_max P=[] chord", "DashLines() lw = 20 x0,y0 = 500.0, 500.0 coils = 12 rho_max =", "# 1. Redistributions of source code must retain the above copyright notice, #", "# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "if key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate", "fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import sys import OpenGL.GLUT as", "those of the authors and should not be interpreted as representing official #", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO,", "import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) ->", "theta <= theta_max: rho = rho_step * theta x = rho * np.cos(", "----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as gl from transforms import ortho", "SUCH DAMAGE. # # The views and conclusions contained in the software and", "def on_keyboard(key, x, y): if key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps):", "list of conditions and the following disclaimer. # # 2. Redistributions in binary", "gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # -------------------------------------", "from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from OpenGL import platform try:", "use in source and binary forms, with or without # modification, are permitted", "in binary form must reproduce the above copyright # notice, this list of", "''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "# EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "= np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32", "gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection =", "= 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable(", "samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed", "= 1 + chord / rho_step while theta <= theta_max: rho = rho_step", "BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "P. ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT", "THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in", "# this list of conditions and the following disclaimer. # # 2. Redistributions", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS", "1. Redistributions of source code must retain the above copyright notice, # this", "* np.cos( theta ) y = rho * np.sin( theta ) P.append( (x,y)", "| gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view}) glut.glutSwapBuffers() #", "glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST )", "rho_max / theta_max P=[] chord = 1 theta = 1 + chord /", "pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape)", "'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height)", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection", "ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid')", "collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if", "rho_max = 450. theta_max = coils * 2 * np.pi rho_step = rho_max", "glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) #", "= min(width, height) # ------------------------------------- def on_keyboard(key, x, y): if key == '\\033':", "0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ ==", "import ctypes from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None,", "DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "height) # ------------------------------------- def on_keyboard(key, x, y): if key == '\\033': sys.exit() #", "------------------------------------- if __name__ == '__main__': import sys import OpenGL.GLUT as glut from dash_lines_2D", "= rho_step * theta x = rho * np.cos( theta ) y =", "glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import sys import", "ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The", "= rho_max / theta_max P=[] chord = 1 theta = 1 + chord", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON", "CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "of conditions and the following disclaimer. # # 2. Redistributions in binary form", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY", "2. Redistributions in binary form must reproduce the above copyright # notice, this", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1), linewidth=lw, dash_pattern =", "must retain the above copyright notice, # this list of conditions and the", "http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import ctypes from OpenGL import platform try: glutInitDisplayString", "NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY", "color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P,", "USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "# ------------------------------------- def on_keyboard(key, x, y): if key == '\\033': sys.exit() # -------------------------------------", "chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95),", ") gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype(", "reproduce the above copyright # notice, this list of conditions and the following", "u_model, 'u_view' : u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width,", "chord / rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid')", ") collection = DashLines() lw = 20 x0,y0 = 500.0, 500.0 coils =", "'darwin': import ctypes from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT,", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY", "<NAME>. All rights reserved. # # Redistribution and use in source and binary", "the above copyright notice, # this list of conditions and the following disclaimer.", "chord / rho_step while theta <= theta_max: rho = rho_step * theta x", "np.cos( theta ) y = rho * np.sin( theta ) P.append( (x,y) )", "and binary forms, with or without # modification, are permitted provided that the", "def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale =", "distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS''", "with the distribution. # # THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER", "np.eye(4).astype( np.float32 ) collection = DashLines() lw = 20 x0,y0 = 500.0, 500.0", "# The views and conclusions contained in the software and documentation are #", "should not be interpreted as representing official # policies, either expressed or implied,", "(x,y) ) theta += chord / rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1),", "------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model,", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "The views and conclusions contained in the software and documentation are # those", "theta_max = coils * 2 * np.pi rho_step = rho_max / theta_max P=[]", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "<filename>demo-spiral.py #!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (C)", "2 * np.pi rho_step = rho_max / theta_max P=[] chord = 1 theta", "in the software and documentation are # those of the authors and should", "* np.pi rho_step = rho_max / theta_max P=[] chord = 1 theta =", "from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for retina display # This", "np.float32 ) collection = DashLines() lw = 20 x0,y0 = 500.0, 500.0 coils", "of conditions and the following disclaimer in the # documentation and/or other materials", "# ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) #", "OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS", "text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB", "* theta x = rho * np.cos( theta ) y = rho *", "import OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for", "and the following disclaimer. # # 2. Redistributions in binary form must reproduce", "ROUGIER ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT", "support for retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform ==", "rho_step * theta x = rho * np.cos( theta ) y = rho", "# collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key, x, y): if key", "P. Rougier. # ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as gl from", "rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0),", "LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #", "that the following conditions are met: # # 1. Redistributions of source code", "collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key, x, y): if key ==", "ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH)", "fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND", "gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype(", "key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -=", "ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key, x, y): if", "u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key, x,", "conclusions contained in the software and documentation are # those of the authors", "code must retain the above copyright notice, # this list of conditions and", "representing official # policies, either expressed or implied, of Nicolas P. Rougier. #", "the following conditions are met: # # 1. Redistributions of source code must", "1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps)", "np import OpenGL.GL as gl from transforms import ortho # ------------------------------------- def on_display():", "OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "DAMAGE. # # The views and conclusions contained in the software and documentation", "u_view}) glut.glutSwapBuffers() # ------------------------------------- def on_reshape(width, height): gl.glViewport(0, 0, width, height) u_projection[...] =", "Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0)", "retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/ if sys.platform == 'darwin': import", ".05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern", "OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF", "np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 )", "60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST", "and conclusions contained in the software and documentation are # those of the", "glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\")", "import DashLines glut.glutInit(sys.argv) # HiDPI support for retina display # This requires glut", "software and documentation are # those of the authors and should not be", "as representing official # policies, either expressed or implied, of Nicolas P. Rougier.", "SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "== '__main__': import sys import OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv)", "OF SUCH DAMAGE. # # The views and conclusions contained in the software", "OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for retina", "IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF", "== '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0", "the following disclaimer in the # documentation and/or other materials provided with the", "while theta <= theta_max: rho = rho_step * theta x = rho *", "on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__ == '__main__': import sys import OpenGL.GLUT", "must reproduce the above copyright # notice, this list of conditions and the", "x, y): if key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase -=", "SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "sys import OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support", "------------------------------------- def on_keyboard(key, x, y): if key == '\\033': sys.exit() # ------------------------------------- def", "glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps, on_timer, fps) # Some init gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA", "* np.sin( theta ) P.append( (x,y) ) theta += chord / rho chord", "ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT", ") -> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text)", "-> None', argNames=() ) text = ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except:", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #", "2013 <NAME>. All rights reserved. # # Redistribution and use in source and", "the # documentation and/or other materials provided with the distribution. # # THIS", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT", "translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1), linewidth=lw, dash_pattern = 'dashed')", "as gl from transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT |", "other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY", "rho_step while theta <= theta_max: rho = rho_step * theta x = rho", "rho * np.cos( theta ) y = rho * np.sin( theta ) P.append(", "list of conditions and the following disclaimer in the # documentation and/or other", "glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() )", "== 'darwin': import ctypes from OpenGL import platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString',", "THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS", "gl from transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)", "__name__ == '__main__': import sys import OpenGL.GLUT as glut from dash_lines_2D import DashLines", "on_keyboard(key, x, y): if key == '\\033': sys.exit() # ------------------------------------- def on_timer(fps): collection.dash_phase", "width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # ------------------------------------- def", "glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard)", "= rho * np.cos( theta ) y = rho * np.sin( theta )", "glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000) glut.glutCreateWindow(\"Dashed rotating spiral\") glut.glutDisplayFunc(on_display) glut.glutReshapeFunc(on_reshape) glut.glutKeyboardFunc(on_keyboard) fps = 60 glut.glutTimerFunc(1000/fps,", "/ rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid') collection.append(P,", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF", "gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32 ) u_view =", "dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI support for retina display # This requires", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "reserved. # # Redistribution and use in source and binary forms, with or", "# Redistribution and use in source and binary forms, with or without #", "min(width, height) # ------------------------------------- def on_keyboard(key, x, y): if key == '\\033': sys.exit()", "= np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection = DashLines() lw", "platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None', argNames=() ) text =", "EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model'", "are # those of the authors and should not be interpreted as representing", "platform try: glutInitDisplayString = platform.createBaseFunction( 'glutInitDisplayString', dll=platform.GLUT, resultType=None, argTypes=[ctypes.c_char_p], doc='glutInitDisplayString( ) -> None',", "# those of the authors and should not be interpreted as representing official", "= 'solid') collection.append(P, translate=(x0,y0), color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid') collection.append(P, translate=(x0,y0), color=(.65,.65,.65,1), linewidth=lw,", "WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' :", "are permitted provided that the following conditions are met: # # 1. Redistributions", "gl.GL_ONE_MINUS_SRC_ALPHA ) gl.glDisable( gl.GL_DEPTH_TEST ) gl.glEnable( gl.GL_BLEND ) gl.glClearColor(1.0,1.0,1.0,1.0) u_projection = np.eye(4).astype( np.float32", "gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection, 'u_model' : u_model, 'u_view' : u_view})", "-*- # ----------------------------------------------------------------------------- # Copyright (C) 2013 <NAME>. All rights reserved. # #", "+= chord / rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2, dash_pattern =", "as np import OpenGL.GL as gl from transforms import ortho # ------------------------------------- def", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND", "the following disclaimer. # # 2. Redistributions in binary form must reproduce the", "TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL NICOLAS P.", "binary forms, with or without # modification, are permitted provided that the following", "glut.glutInit(sys.argv) # HiDPI support for retina display # This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/", "rho * np.sin( theta ) P.append( (x,y) ) theta += chord / rho", "# 2. Redistributions in binary form must reproduce the above copyright # notice,", "= ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # ------------------------------------- def on_keyboard(key, x, y):", "double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH) glut.glutInitWindowSize(1000, 1000)", "POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the", "views and conclusions contained in the software and documentation are # those of", "# ------------------------------------- def on_timer(fps): collection.dash_phase -= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps)", "u_projection = np.eye(4).astype( np.float32 ) u_view = np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype(", "np.eye(4).astype( np.float32 ) u_model = np.eye(4).astype( np.float32 ) collection = DashLines() lw =", "IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "theta ) P.append( (x,y) ) theta += chord / rho chord += .05", "# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF #", "transforms import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection':", "Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL as gl", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF", "import ortho # ------------------------------------- def on_display(): gl.glClearColor(1,1,1,1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) collection.draw(uniforms= {'u_projection': u_projection,", ") theta += chord / rho chord += .05 collection.append(P, translate=(x0,y0), color=(0,0,0,1), linewidth=lw+2,", "import sys import OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) # HiDPI", "0, width, height) u_projection[...] = ortho(0,width,0,height,-1,1) # collection.scale = min(width, height) # -------------------------------------", "LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "(C) 2013 <NAME>. All rights reserved. # # Redistribution and use in source", "-= 0.05 collection.rotate -= 0.25*np.pi/180.0 glut.glutTimerFunc(1000/fps, on_timer, fps) glut.glutPostRedisplay() # ------------------------------------- if __name__", "be interpreted as representing official # policies, either expressed or implied, of Nicolas", "OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "theta = 1 + chord / rho_step while theta <= theta_max: rho =", "# modification, are permitted provided that the following conditions are met: # #", "'__main__': import sys import OpenGL.GLUT as glut from dash_lines_2D import DashLines glut.glutInit(sys.argv) #", "source code must retain the above copyright notice, # this list of conditions", "copyright # notice, this list of conditions and the following disclaimer in the", "above copyright notice, # this list of conditions and the following disclaimer. #", "implied, of Nicolas P. Rougier. # ----------------------------------------------------------------------------- import numpy as np import OpenGL.GL", "= ctypes.c_char_p(\"rgba stencil double samples=8 hidpi\") glutInitDisplayString(text) except: pass glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB |" ]
[ "# Copyright (c) OpenMMLab. All rights reserved. import importlib import os import sys", "os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not None:", "OpenMMLab. All rights reserved. import importlib import os import sys lib_dir = os.path.abspath(", "False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper']", "Copyright (c) OpenMMLab. All rights reserved. import importlib import os import sys lib_dir", "importlib import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available", "from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def is_available() ->", "importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available =", "not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def", "os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not None: from", "import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if", "lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper", "import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available =", "reserved. import importlib import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0,", "sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python')", "if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available", "import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def is_available() -> bool: return", "rights reserved. import importlib import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib'))", ".wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def is_available() -> bool:", "_is_available = False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper __all__", "= os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not", "import importlib import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir)", "<reponame>aegis-rider/mmdeploy # Copyright (c) OpenMMLab. All rights reserved. import importlib import os import", "All rights reserved. import importlib import os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__),", "os import sys lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False", "SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def is_available() -> bool: return _is_available", "sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import", "is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True", "None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True def is_available()", "lib_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is", "(c) OpenMMLab. All rights reserved. import importlib import os import sys lib_dir =", "= False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper import SDKWrapper __all__ =", "'../../../build/lib')) sys.path.insert(0, lib_dir) _is_available = False if importlib.util.find_spec('mmdeploy_python') is not None: from .wrapper" ]
[]
[ "# arguments of cb: this clientlist object, player number of changed client def", "self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close()", "select(self.socks,[],[],1)[0] for sock in rdy: if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128)", "packets_dump.dump_packet_raw(data) return # both packet types give us enough information to store the", "== self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def", "enough information to store the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet)", "[\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip", "self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock]", "import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired = 3", "self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self):", "self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number", "vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while", "cb: this clientlist object, player_number, changed slot def set_media_change_callback(self, cb=None): self.cl.media_change_callback = cb", "\"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\"", "cb=None): self.cl.client_change_callback = cb # called when a player media changes # arguments", "parse beat packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if", "= \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip =", "socket import logging from threading import Thread from select import select from enum", "update of a known client is received # arguments of cb: this clientlist", "= vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if", "50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "except Exception as e: logging.warning(\"Failed to parse beat packet from {}, {} bytes:", "super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self)", "{}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types give us enough information", "packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is received # arguments of cb:", "= NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port =", "is None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not", "packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired = 3 class", "self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr)", "the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None", "client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and", "socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip,", "self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet", "self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port", "#logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as e:", "def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is", "packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is received #", "\"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip =", "logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port))", "clientlist object, player number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb", "= DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port =", "3 class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj", "self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr))", "bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet)", "number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever", "self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self,", "on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening", "elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock ==", "player number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called", "= self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data,", "2, acquired = 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data", "a player media changes # arguments of cb: this clientlist object, player_number, changed", "0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own interface {}", "ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast", "from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types", "{}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types give", "self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip", "def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when a player media changes", "self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip", "received # arguments of cb: this clientlist object, player number of changed client", "self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port", "logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running", "cb: this clientlist object, player number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback", "prodj.network import packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting", "def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number =", "from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse", "packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse beat packet from {}, {}", "rdy = select(self.socks,[],[],1)[0] for sock in rdy: if sock == self.keepalive_sock: data, addr", "addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as", "on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running =", "class OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired = 3 class ProDj(Thread):", "set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever a status update of a", "addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256)", "packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to", "addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as", "status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start()", "= None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port))", "None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None:", "logging from threading import Thread from select import select from enum import Enum", "us enough information to store the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]:", "rdy: if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock", "super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def", "Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\"", "self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number =", "run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy:", "guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own interface {} ip {} mask", "data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception", "{} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr):", "import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from prodj.network import", "\"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded", "self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "{}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse keepalive", "data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr =", "False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set", "self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif", "types give us enough information to store the client if packet[\"type\"] in [\"type_ip\",", "client is received # arguments of cb: this clientlist object, player number of", "logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self):", "{}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse status", "addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128)", "self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr", "sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\")", "prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip", "import NfsClient from prodj.network.ip import guess_own_iface from prodj.network import packets from prodj.network import", "packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr):", "to parse beat packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return", "self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number", "# arguments of cb: this clientlist object, player_number, changed slot def set_media_change_callback(self, cb=None):", "from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from prodj.network import packets from", "1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET,", "import logging from threading import Thread from select import select from enum import", "packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start()", "set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start()", "give us enough information to store the client if packet[\"type\"] in [\"type_ip\", \"type_status\",", "vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number", "[\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr))", "Exception as e: logging.warning(\"Failed to parse keepalive packet from {}, {} bytes: {}\".format(addr,", "NfsClient from prodj.network.ip import guess_own_iface from prodj.network import packets from prodj.network import packets_dump", "data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception", "= socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive", "self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def", "vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not", "= self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data,", "self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock", "self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc()", "Exception as e: logging.warning(\"Failed to parse beat packet from {}, {} bytes: {}\".format(addr,", "media changes # arguments of cb: this clientlist object, player_number, changed slot def", "logging.warning(\"Failed to parse beat packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data)", "of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever a", "from prodj.network import packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1,", "from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse", "if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running:", "self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock", "{}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try:", "cb # called whenever a status update of a known client is received", "changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when a player", "in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from", "= 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port =", "handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except", "packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse keepalive packet from", "{}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]:", "logging.warning(\"Failed to parse status packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data)", "vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def", "import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import", "stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5):", "def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data)", "except Exception as e: logging.warning(\"Failed to parse status packet from {}, {} bytes:", "socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock =", "a status update of a known client is received # arguments of cb:", "self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip,", "parse status packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet)", "not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0]", "notNeeded = 1, waiting = 2, acquired = 3 class ProDj(Thread): def __init__(self):", "{}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def", "try: packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse status packet", "keepalive packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both", "= ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip =", "{} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types give us", "None: logging.info(\"Guessed own interface {} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet)", "= True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable()", "packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status", "{}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse beat", "cb # called when a player media changes # arguments of cb: this", "a keepalive packet is received # arguments of cb: this clientlist object, player", "None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for", "called whenever a keepalive packet is received # arguments of cb: this clientlist", "\"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try:", "self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if", "{} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat", "is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy =", "this clientlist object, player number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback =", "OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired = 3 class ProDj(Thread): def", "data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data,", "self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close()", "status packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet)", "prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired =", "addr) elif sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock", "self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number)) self.vcdj.player_number", "== self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data,", "from prodj.network.ip import guess_own_iface from prodj.network import packets from prodj.network import packets_dump class", "OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)", "a known client is received # arguments of cb: this clientlist object, player", "len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self,", "vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def", "self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop()", "packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on", "= select(self.socks,[],[],1)[0] for sock in rdy: if sock == self.keepalive_sock: data, addr =", "if self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip", "= OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,", "self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None:", "50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002", "self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to {}\".format(vcdj_player_number))", "self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock,", "store the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is", "Thread from select import select from enum import Enum from prodj.core.clientlist import ClientList", "= cb # called whenever a status update of a known client is", "in rdy: if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif", "beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed", "{}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip,", "= 1, waiting = 2, acquired = 3 class ProDj(Thread): def __init__(self): super().__init__()", "50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None", "elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop", "1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET,", "= cb # called when a player media changes # arguments of cb:", "\"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps())", "from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse", "except Exception as e: logging.warning(\"Failed to parse keepalive packet from {}, {} bytes:", "{}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self):", "self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy", "as e: logging.warning(\"Failed to parse keepalive packet from {}, {} bytes: {}\".format(addr, len(data),", "= 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET,", "#self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self):", "== self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data,", "self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for", "from enum import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from", "{}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet", "called whenever a status update of a known client is received # arguments", "status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed", "len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types give us enough information to", "ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\"", "set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when a player media changes #", "packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse beat packet from", "logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,", "#logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as e:", "= \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self):", "for sock in rdy: if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data,", "self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr", "prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient", "ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient", "parse keepalive packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return #", "addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception as", "while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy: if sock == self.keepalive_sock:", "self.cl.client_keepalive_callback = cb # called whenever a status update of a known client", "import guess_own_iface from prodj.network import packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded", "= socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks", "on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)", "self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player", "acquired = 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data =", "number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when", "packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse status packet from", "self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr)", "self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own interface {} ip", "self.cl.client_change_callback = cb # called when a player media changes # arguments of", "socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port))", "self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy: if sock == self.keepalive_sock: data,", "{}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on", "own interface {} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self,", "True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join()", "packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) >", "self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False", "= packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse keepalive packet from {},", "e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is received", "addr) elif sock == self.status_sock: data, addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main", "try: packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse keepalive packet", "self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for", "import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import", "socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip,", "e)) packets_dump.dump_packet_raw(data) return # both packet types give us enough information to store", "self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port", "self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is received # arguments of", "packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) #", "= Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip =", "for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start()", "self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def", "is not None: logging.info(\"Guessed own interface {} ip {} mask {} mac {}\".format(*self.own_ip))", "packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse status packet from {}, {}", "of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when a", "packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet", "None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening", "self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{}", "Exception as e: logging.warning(\"Failed to parse status packet from {}, {} bytes: {}\".format(addr,", "socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock,", "handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except", "for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port))", "= \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip =", "from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\",", "{} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from", "try: packet = packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse beat packet", "packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in", "handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except", "NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001", "def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on", "e: logging.warning(\"Failed to parse status packet from {}, {} bytes: {}\".format(addr, len(data), e))", "cb: this clientlist object, player number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback", "guess_own_iface from prodj.network import packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded =", "# called whenever a keepalive packet is received # arguments of cb: this", "self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port))", "logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy: if", "if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps())", "= vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join()", "threading import Thread from select import select from enum import Enum from prodj.core.clientlist", "interface {} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data,", "whenever a status update of a known client is received # arguments of", "changes # arguments of cb: this clientlist object, player_number, changed slot def set_media_change_callback(self,", "logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try:", "when a player media changes # arguments of cb: this clientlist object, player_number,", "for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{}", "self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive", "self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop() self.data.stop()", "number set to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface()", "def __init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs", "loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet", "logging.warning(\"Failed to parse keepalive packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data)", "e: logging.warning(\"Failed to parse keepalive packet from {}, {} bytes: {}\".format(addr, len(data), e))", "is received # arguments of cb: this clientlist object, player number of changed", "{}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever", "both packet types give us enough information to store the client if packet[\"type\"]", "def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def", "Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface", "beat packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"]", "{} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet)", "self.own_ip is not None: logging.info(\"Guessed own interface {} ip {} mask {} mac", "# both packet types give us enough information to store the client if", "vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip", "start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{}", "finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet =", "self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main", "self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting", "packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to parse keepalive packet from {}, {}", "keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed", "of cb: this clientlist object, player number of changed client def set_client_change_callback(self, cb=None):", "whenever a keepalive packet is received # arguments of cb: this clientlist object,", "1, waiting = 2, acquired = 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl", "= 2, acquired = 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self)", "import socket import logging from threading import Thread from select import select from", "= self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast", "to parse keepalive packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return", "ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self)", "to {}\".format(vcdj_player_number)) self.vcdj.player_number = vcdj_player_number #self.data.dbc.own_player_number = vcdj_player_number def vcdj_enable(self): self.vcdj_set_iface() self.vcdj.start() def", "known client is received # arguments of cb: this clientlist object, player number", "if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status", "packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting = 2,", "def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever a status update of", "bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return # both packet types give us enough", "self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running = False self.nfs.stop()", "arguments of cb: this clientlist object, player number of changed client def set_client_change_callback(self,", "waiting = 2, acquired = 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl =", "from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from", "def run(self): logging.debug(\"starting main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in", "= packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse status packet from {},", "arguments of cb: this clientlist object, player number of changed client def set_client_keepalive_callback(self,", "loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy: if sock ==", "self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet", "= socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat", "def stop(self): self.keep_running = False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self,", "cb=None): self.cl.client_keepalive_callback = cb # called whenever a status update of a known", "keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening", "def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self): logging.debug(\"starting main loop\")", "beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for", "e: logging.warning(\"Failed to parse beat packet from {}, {} bytes: {}\".format(addr, len(data), e))", "def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data)", "self.own_ip is None and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is", "{} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a", "= packets.BeatPacket.parse(data) except Exception as e: logging.warning(\"Failed to parse beat packet from {},", "self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip", "arguments of cb: this clientlist object, player_number, changed slot def set_media_change_callback(self, cb=None): self.cl.media_change_callback", "def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data)", "main loop\") while self.keep_running: rdy = select(self.socks,[],[],1)[0] for sock in rdy: if sock", "[self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running =", "addr = self.status_sock.recvfrom(256) self.handle_status_packet(data, addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr):", "packet types give us enough information to store the client if packet[\"type\"] in", "packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet =", "select from enum import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj", "return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast", "packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet", "{}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True", "= [self.keepalive_sock, self.beat_sock, self.status_sock] self.keep_running = True self.data.start() self.nfs.start() super().start() def stop(self): self.keep_running", "clientlist object, player number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb", "self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock =", "enum import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider", "logging.info(\"Guessed own interface {} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def", "to store the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip", "\"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock", "import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import", "addr) self.cl.gc() logging.debug(\"main loop finished\") def handle_keepalive_packet(self, data, addr): #logging.debug(\"Broadcast keepalive packet from", "information to store the client if packet[\"type\"] in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if", "not None: logging.info(\"Guessed own interface {} ip {} mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface()", "self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number set to", "packet is received # arguments of cb: this clientlist object, player number of", "from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called", "and len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed", "import select from enum import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import", "data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet = packets.BeatPacket.parse(data) except Exception", "as e: logging.warning(\"Failed to parse status packet from {}, {} bytes: {}\".format(addr, len(data),", "return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is received # arguments", "sock in rdy: if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr)", "from select import select from enum import Enum from prodj.core.clientlist import ClientList from", "prodj.network.ip import guess_own_iface from prodj.network import packets from prodj.network import packets_dump class OwnIpStatus(Enum):", "data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock: data, addr =", "client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called when a player media", "if sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock ==", "self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip = \"0.0.0.0\" self.beat_port = 50001 self.status_ip", "from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting = 2, acquired", "as e: logging.warning(\"Failed to parse beat packet from {}, {} bytes: {}\".format(addr, len(data),", "socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip, self.status_port)) self.socks =", "import Thread from select import select from enum import Enum from prodj.core.clientlist import", "bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive", "#logging.debug(\"Broadcast keepalive packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as e:", "from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider from", "object, player number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb #", "packet from {}\".format(addr)) try: packet = packets.StatusPacket.parse(data) except Exception as e: logging.warning(\"Failed to", "self.vcdj.start() def vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4])", "object, player number of changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb #", "player number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback = cb # called", "= guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own interface {} ip {}", "from prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from", "len(data), e)) packets_dump.dump_packet_raw(data) return self.cl.eatStatus(packet) packets_dump.dump_status_packet(packet) # called whenever a keepalive packet is", "called when a player media changes # arguments of cb: this clientlist object,", "player media changes # arguments of cb: this clientlist object, player_number, changed slot", "self.need_own_ip = OwnIpStatus.notNeeded self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET,", "= 3 class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self)", "DataProvider(self) self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000", "self.vcdj = Vcdj(self) self.nfs = NfsClient(self) self.keepalive_ip = \"0.0.0.0\" self.keepalive_port = 50000 self.beat_ip", "sock == self.keepalive_sock: data, addr = self.keepalive_sock.recvfrom(128) self.handle_keepalive_packet(data, addr) elif sock == self.beat_sock:", "select import select from enum import Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj", "packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr)) try: packet =", "self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data, addr): #logging.debug(\"Broadcast status packet from {}\".format(addr)) try: packet", "of a known client is received # arguments of cb: this clientlist object,", "= 50001 self.status_ip = \"0.0.0.0\" self.status_port = 50002 self.need_own_ip = OwnIpStatus.notNeeded self.own_ip =", "return # both packet types give us enough information to store the client", "socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock =", "to parse status packet from {}, {} bytes: {}\".format(addr, len(data), e)) packets_dump.dump_packet_raw(data) return", "from threading import Thread from select import select from enum import Enum from", "> 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own interface", "# called when a player media changes # arguments of cb: this clientlist", "in [\"type_ip\", \"type_status\", \"type_change\"]: self.cl.eatKeepalive(packet) if self.own_ip is None and len(self.cl.getClientIps()) > 0:", "socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port))", "class ProDj(Thread): def __init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj =", "import packets from prodj.network import packets_dump class OwnIpStatus(Enum): notNeeded = 1, waiting =", "packet from {}\".format(addr)) try: packet = packets.KeepAlivePacket.parse(data) except Exception as e: logging.warning(\"Failed to", "mask {} mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet", "changed client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever a status", "sock == self.beat_sock: data, addr = self.beat_sock.recvfrom(128) self.handle_beat_packet(data, addr) elif sock == self.status_sock:", "= False self.nfs.stop() self.data.stop() self.vcdj_disable() self.join() self.keepalive_sock.close() self.beat_sock.close() def vcdj_set_player_number(self, vcdj_player_number=5): logging.info(\"Player number", "self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.beat_sock.setsockopt(socket.SOL_SOCKET,", "keepalive packet is received # arguments of cb: this clientlist object, player number", "of cb: this clientlist object, player number of changed client def set_client_keepalive_callback(self, cb=None):", "mac {}\".format(*self.own_ip)) self.vcdj_set_iface() packets_dump.dump_keepalive_packet(packet) def handle_beat_packet(self, data, addr): #logging.debug(\"Broadcast beat packet from {}\".format(addr))", "self.beat_sock.bind((self.beat_ip, self.beat_port)) logging.info(\"Listening on {}:{} for beat packets\".format(self.beat_ip, self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)", "status update of a known client is received # arguments of cb: this", "this clientlist object, player number of changed client def set_client_change_callback(self, cb=None): self.cl.client_change_callback =", "len(self.cl.getClientIps()) > 0: self.own_ip = guess_own_iface(self.cl.getClientIps()) if self.own_ip is not None: logging.info(\"Guessed own", "vcdj_disable(self): self.vcdj.stop() self.vcdj.join() def vcdj_set_iface(self): if self.own_ip is not None: self.vcdj.set_interface_data(*self.own_ip[1:4]) def run(self):", "e)) packets_dump.dump_packet_raw(data) return if packet[\"type\"] in [\"type_beat\", \"type_mixer\"]: self.cl.eatBeat(packet) packets_dump.dump_beat_packet(packet) def handle_status_packet(self, data,", "self.own_ip = None def start(self): self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip,", "__init__(self): super().__init__() self.cl = ClientList(self) self.data = DataProvider(self) self.vcdj = Vcdj(self) self.nfs =", "of cb: this clientlist object, player_number, changed slot def set_media_change_callback(self, cb=None): self.cl.media_change_callback =", "self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port)) logging.info(\"Listening on {}:{} for keepalive packets\".format(self.keepalive_ip, self.keepalive_port)) self.beat_sock", "client def set_client_keepalive_callback(self, cb=None): self.cl.client_keepalive_callback = cb # called whenever a status update", "Enum from prodj.core.clientlist import ClientList from prodj.core.vcdj import Vcdj from prodj.data.dataprovider import DataProvider", "prodj.data.dataprovider import DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from prodj.network", "if self.own_ip is not None: logging.info(\"Guessed own interface {} ip {} mask {}", "self.beat_port)) self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.status_sock.bind((self.status_ip, self.status_port)) logging.info(\"Listening on {}:{} for status packets\".format(self.status_ip,", "# called whenever a status update of a known client is received #", "prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from prodj.network import packets from prodj.network", "DataProvider from prodj.network.nfsclient import NfsClient from prodj.network.ip import guess_own_iface from prodj.network import packets" ]
[ "momentum): '''Helpfull function that converts momentum to kinetic energy''' return math.sqrt(mass**2 + momentum**2)", "time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout:", "scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton", "system created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers):", "MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0 # to", "2 + y_loc # print(f'y location of sensor {n} is: {y_loc}') # create", "dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow'", "= 250, distance_to_system = 1, system_thickness = 1, n_sensors = 1, sensor_pitch =", "strftime class MacroWriter: \"\"\"Main class for creating a macro file to be run", "top_surface of the system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2", "START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose", "sensor In the current implementation sensor is a flat plane perpendicular to the", "to Skull for the first and the last layer elif layer == 1", "+= item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\"", "f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum): '''Helpfull", "/gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation", "self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system = True def", "file and mark it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name", "#/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "phantom_material = 'Skull' # layers start from 10 and extend in the negative", "a sensor In the current implementation sensor is a flat plane perpendicular to", "/gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines", "= physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {}", "for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion", "stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum): '''Helpfull function that converts", "mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm", "mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def", "DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if", "LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines from phantom and sensors for", "\"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the layer{n} #*************************************************************", "#/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates the", "box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation", "/gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose", "instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers", "log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) #", "= logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors',", "for creating a phantom box. which in the current implementation is a flat", "of the system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 -", "/gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n}", "macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list = physics_list", "layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1,", "{self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD", "/gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc}", "# /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name", "= path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag", "self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== #", "= f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute", "\"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder", "self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add sensor lines geometry_lines", "\"\"\"runs macro file the log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path", "if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute", "= 'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength", "In the current implementation sensor is a flat plane perpendicular to the beam", "self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path =", "in self.physics_dict.values(): physics_lines += item # write to file and mark it with", "'Skull' # layers start from 10 and extend in the negative y direction", "z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating a sensor In the current", "= f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines", "in y direction all dimensions are in cm two actors are currently added", "macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def", "'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length}", "point from the center to the top_surface of the system # print(f'system thickness:", "/gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self,", "current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w')", "/gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return", "{y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#*************************************************************", "0: phantom_material = 'Air' # set material to Skull for the first and", "def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable", "layer is added to get parameters at the entrance to the real phantom", "y_loc=y_loc, material=phantom_material) # create system with sensors and readout chips for i_sensor in", "perpendicular to beam which is in y direction all dimensions are in cm", "- distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc = 10 for layer in", "15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000", "mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1", "Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path +", "/gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose", "= 1, system_thickness = 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5):", "3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad", "/control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute", "create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm", "def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom", "'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict =", "f''' /gate/run/initialize # Enable the following lines to display available and enabled processes", "# initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) #", "{thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible", "#===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name", "item in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with", "box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached", "in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'):", "direction all dimensions are in cm two actors are currently added to this", "self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom", "z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro for", "#===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy", "#===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 #", "= f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0", "1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts", "# OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name,", "1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\"", "a GATE macro for creating a sensor In the current implementation sensor is", "self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines", "MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc =", "Air layer is added to get parameters at the entrance to the real", "# then add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box", "{results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return", "my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file is", "creating a sensor In the current implementation sensor is a flat plane perpendicular", "box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save", "output: name the macro file, a dictionary containing the list of root file", "{energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm", "my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc = 10", "self.logs = logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics',", "NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all',", "added to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path", "'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert", "10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}']", "3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis", "actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "layer if layer == 0: phantom_material = 'Air' # set material to Skull", "center to the top_surface of the system # print(f'system thickness: {self.system_thickness}') y_loc =", "/gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n}", "the system created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in", "'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers =", "the last layer elif layer == 1 or layer == n_phantom_layers - 1:", "create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines = f''' #=====================================================", "get parameters at the entrance to the real phantom layer if layer ==", "'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict = {}", "def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results", "dimensions are in cm two actors are currently added to this volume PhaseSpaceActor", "self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0,", "Cell import math def Ek(mass, momentum): '''Helpfull function that converts momentum to kinetic", "self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for key in ['trackers',", "# print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 - thickness / 2", "self.no_system: # print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness", "file, a dictionary containing the list of root file dictionary keys are 'trackers',", "range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2", "the beam the beam is along y direction all dimensions are in mm\"\"\"", "# /gate/random/verbose 1 # /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput", "\"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines", "self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm", "90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}',", "creating a phantom box. which in the current implementation is a flat box", "f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000", "y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro", "open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass,", "a flat plane perpendicular to the beam the beam is along y direction", "= {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for key", "add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length}", "be run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon',", "/gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm", "phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) *", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines =", "to get parameters at the entrance to the real phantom layer if layer", "1, system_thickness = 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets", "{x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm", "= 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system", "for key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created", "#sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length}", "for the first and the last layer elif layer == 1 or layer", "/gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']}", "= sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors # initialize an instance", "negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system", "write to file and mark it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics']", "self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion", "true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self):", "# ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== #", "f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000):", "# create phantom layers y_loc = 10 for layer in range(n_phantom_layers): phantom_material =", "roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy)", "{self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute", "time import strftime class MacroWriter: \"\"\"Main class for creating a macro file to", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines = f'''", "= f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp #", "actors are currently added to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path =", "for layer in range(n_phantom_layers): phantom_material = 'Water' # the parameters of the particles", "physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200,", "def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE", "range(n_phantom_layers): phantom_material = 'Water' # the parameters of the particles are recorded at", "`gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro =", "to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] #", "macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material", "lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #=====================================================", "system='scanner'): \"\"\"Compose a GATE macro for creating a sensor In the current implementation", "'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict = {} self.system_y_loc = system_y_loc", "self.beam_created = False self.no_system = True def print_info(self): print(f'Info for the system created", "macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def", "'Ek'] # Cell import subprocess from os import path from time import strftime", "y direction all dimensions are in mm\"\"\" # move starting point from the", "geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors # initialize", "macro file the log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path =", "lines to display available and enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled", "{material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence)", "system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs = logs_folder", "system with sensors and readout chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch", "0 self.beam_created = False self.no_system = True def print_info(self): print(f'Info for the system", "true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo", "mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm", "'w') as f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize # Enable the", "== 0: phantom_material = 'Air' # set material to Skull for the first", "= False # then add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n}", "layers start from 10 and extend in the negative y direction y_loc -=", "as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum):", "n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for", "layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1", "self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines =", "as f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world", "create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro", "mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1", "#===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================", "and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path)", "results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry',", "file the log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder,", "/gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation", "from the layer # the Air layer is added to get parameters at", "to file and mark it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] =", "0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates the main", "mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics", "/gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor", "= results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict =", "y direction all dimensions are in cm two actors are currently added to", "if material == 'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom box {n}", "volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt')", "only once geometry_lines = '' if self.no_system: # print(f'system created with: thickness: {self.system_thickness}", "{position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad", "thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def", "/ 2 + y_loc # print(f'y location of sensor {n} is: {y_loc}') #", "path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow' geometry_lines =", "parameters for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness +", "system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors #", "as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS", "f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines", "/gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm", "= actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion", "EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro',", "Compose a GATE macro for creating a phantom box. which in the current", "of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc", "/gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to the", "from os import path from time import strftime class MacroWriter: \"\"\"Main class for", "#================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #=====================================================", "__init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results =", "'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World", "tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true", "f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize # Enable the following lines", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #=====================================================", "START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit", "#/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] =", "- 1: phantom_material = 'Skull' # layers start from 10 and extend in", "phantom_material = 'Water' # the parameters of the particles are recorded at the", "self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines", "physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict", "and extend in the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc,", "system_thickness = (sensor_thickness + sensor_pitch) * n_sensors # initialize an instance of MacroWriter", "os import path from time import strftime class MacroWriter: \"\"\"Main class for creating", "f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color =", "0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1", "# BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy}", "# PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== #", "# World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air", "not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]}", "0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines =", "the exit from the layer # the Air layer is added to get", "of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200,", "is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as", "{z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color}", "/gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize #", "beam is along y direction all dimensions are in mm\"\"\" # move starting", "dictionary containing the list of root file dictionary keys are 'trackers', 'hits', 'dose'", "system_thickness = 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters", "# print(f'y location of sensor {n} is: {y_loc}') # create system fisrt but", "= dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {}", "{x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines =", "proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess", "initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create", "{color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the layer{n}", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== #", "= f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister", "last layer elif layer == 1 or layer == n_phantom_layers - 1: phantom_material", "starting point from the center to the top_surface of the system # print(f'system", "x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro for creating a", "as f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize # Enable the following", "/gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed", "{self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]}", "self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY", "for item in self.physics_dict.values(): physics_lines += item # write to file and mark", "{sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc,", "flat box perpendicular to beam which is in y direction all dimensions are", "/gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out", "mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc}", "lines from phantom and sensors for item in self.physics_dict.values(): physics_lines += item #", "# GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ #", "creating a macro file to be run by `gate` \"\"\" def __init__(self, macro_folder='../mac',", "#phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm", "f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines", "/gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm", "mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle", "/control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name =", "= (sensor_thickness + sensor_pitch) * n_sensors # initialize an instance of MacroWriter my_macro", "+= item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "{y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines =", "= 'Air' # set material to Skull for the first and the last", "f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines +=", "#===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #=====================================================", "/gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\"", "as f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21,", "AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ =", "0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to the box{n} #***************************************************************", "../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial", "def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item", "actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values():", "0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name", "lines = f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName", "self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material =", "import math def Ek(mass, momentum): '''Helpfull function that converts momentum to kinetic energy'''", "y_loc # print(f'y location of sensor {n} is: {y_loc}') # create system fisrt", "Ek(mass, momentum): '''Helpfull function that converts momentum to kinetic energy''' return math.sqrt(mass**2 +", "1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor", "actor_lines = f\"\"\"#************************************************************* # attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n}", "and readout chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) *", "world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner", "range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose", "= physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines =", "for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch)", "/gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines", "/gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def", "self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom box", "is: {y_loc}') # create system fisrt but only once geometry_lines = '' if", "/gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines += item", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\"", "add lines from phantom and sensors for item in self.physics_dict.values(): physics_lines += item", "15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0", "/gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates the main macro", "\"\"\"Compose a GATE macro for creating a sensor In the current implementation sensor", "for the system created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer", "= self.system_thickness / 2 - thickness / 2 + y_loc # print(f'y location", "for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0,", "mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to the box{n} #*************************************************************** /gate/actor/addActor", "#/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def", "in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}',", "- sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness)", "/gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam()", "{roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return", "z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro for creating a phantom box.", "mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha", "#===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456", "Air ''' for item in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry']", "path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1", "/gate/run/initialize # Enable the following lines to display available and enabled processes #", "/gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* #", "sensor {n} is: {y_loc}') # create system fisrt but only once geometry_lines =", "/gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true", "= geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList", "the Air layer is added to get parameters at the entrance to the", "implementation sensor is a flat plane perpendicular to the beam the beam is", "'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict = {} self.system_y_loc =", "following lines to display available and enabled processes # /gate/physics/processList Available # /gate/physics/processList", "21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system = 1,", "sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines", "/gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits", "f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell", "sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell", "log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with", "item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines)", "{self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac'", "false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType", "box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true", "/gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines from", "box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines", "self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0", "elif layer == 1 or layer == n_phantom_layers - 1: phantom_material = 'Skull'", "geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc = 10 for layer in range(n_phantom_layers):", "{} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for key in", "-(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor", "# write to file and mark it with current timestamp macro_name = f'physics{self.timestamp}.mac'", "self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm", "random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}']", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize # Enable", "= sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc=", "#************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm", "f\"\"\"#************************************************************* # attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path}", "macro for creating a phantom box. which in the current implementation is a", "layer in range(n_phantom_layers): phantom_material = 'Water' # the parameters of the particles are", "self.macro = macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list", "'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo", "DO NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter',", "direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors", "f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV", "out def create_macro_file(self): \"\"\"creates the main macro file output: name the macro file,", "= 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for", "chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc", "box perpendicular to beam which is in y direction all dimensions are in", "f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness =", "self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add sensor lines geometry_lines += f\"\"\"", "mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess", "layer == n_phantom_layers - 1: phantom_material = 'Skull' # layers start from 10", "geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm", "alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines", "/control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute", "lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength", "/gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines", "/gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15", "= path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow' geometry_lines", "/gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc}", "sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2 -", "of root file dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors()", "self.n_phantom_layers = 0 self.beam_created = False self.no_system = True def print_info(self): print(f'Info for", "as f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out", "or layer == n_phantom_layers - 1: phantom_material = 'Skull' # layers start from", "# move starting point from the center to the top_surface of the system", "plane perpendicular to the beam the beam is along y direction all dimensions", "phantom layers y_loc = 10 for layer in range(n_phantom_layers): phantom_material = 'Water' #", "== 'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n}", "current implementation is a flat box perpendicular to beam which is in y", "{z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines", "the following lines to display available and enabled processes # /gate/physics/processList Available #", "macro file, a dictionary containing the list of root file dictionary keys are", "/gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2", "/gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add", "y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with", "in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False", "/gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm", "in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}',", "path from time import strftime class MacroWriter: \"\"\"Main class for creating a macro", "physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion", "# Enable the following lines to display available and enabled processes # /gate/physics/processList", "to display available and enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled '''", "sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors # initialize an instance of", "10 and extend in the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer],", "timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines)", "true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines", "sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc,", "{x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc}", "self.system_thickness = system_thickness self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp", "/gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air", "keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not", "= f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self):", "box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc}", "material='Water', color='blue'): ''' Compose a GATE macro for creating a phantom box. which", "loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then", "GATE macro for creating a phantom box. which in the current implementation is", "which in the current implementation is a flat box perpendicular to beam which", "system fisrt but only once geometry_lines = '' if self.no_system: # print(f'system created", "are recorded at the exit from the layer # the Air layer is", "at the entrance to the real phantom layer if layer == 0: phantom_material", "/gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1", "box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc}", "{position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3", "''' for item in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] =", "# add lines from phantom and sensors for item in self.physics_dict.values(): physics_lines +=", "macro file output: name the macro file, a dictionary containing the list of", "/gate/physics/addProcess HadronIonisation proton ''' # add lines from phantom and sensors for item", "= 10 for layer in range(n_phantom_layers): phantom_material = 'Water' # the parameters of", "material to Skull for the first and the last layer elif layer ==", "my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname,", "def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength", "for item in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name", "/control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute", "are currently added to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results,", "1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def", "macro file to be run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs',", "are in mm\"\"\" # move starting point from the center to the top_surface", "exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return macro_name,", "set material to Skull for the first and the last layer elif layer", "/gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] =", "Skull for the first and the last layer elif layer == 1 or", "f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed", "n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and", "which is in y direction all dimensions are in cm two actors are", "/gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter", "n_sensors # initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness)", "{z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\"", "+= item # write to file and mark it with current timestamp macro_name", "100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs", "y_loc = self.system_thickness / 2 - thickness / 2 + y_loc # print(f'y", "/gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter", "geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)}", "to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path =", "dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict", "and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors", "self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines =", "phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors and readout chips", "macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def", "mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item in", "with open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def", "along y direction all dimensions are in mm\"\"\" # move starting point from", "geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'):", "/gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\"", "check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] =", "class for creating a macro file to be run by `gate` \"\"\" def", "/gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc}", "mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1", "\"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness", "an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom", "i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize", "and mark it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with", "z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength", "log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math", "sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness", "macro for creating a sensor In the current implementation sensor is a flat", "a phantom box. which in the current implementation is a flat box perpendicular", "two actors are currently added to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path", "distance_to_system = 1, system_thickness = 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5,", "Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy =", "def create_macro_file(self): \"\"\"creates the main macro file output: name the macro file, a", "= 0 self.beam_created = False self.no_system = True def print_info(self): print(f'Info for the", "= 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system =", "world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner", "in range(n_phantom_layers): phantom_material = 'Water' # the parameters of the particles are recorded", "processes # /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] =", "<gh_stars>0 # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified).", "the parameters of the particles are recorded at the exit from the layer", "the beam is along y direction all dimensions are in mm\"\"\" # move", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines = f'''", "otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess from", "self.no_system = True def print_info(self): print(f'Info for the system created on {self.timestamp}...') print(f'Number", "to the top_surface of the system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness", "= True lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType", "/gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac'", "/gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines += item", "MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0 # to check Steplimiter", "self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #=====================================================", "* n_sensors # initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system,", "mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron", "phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system = 1, system_thickness", "'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system = True", "start from 10 and extend in the negative y direction y_loc -= phantom_layer_thickness[layer]", "thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the", "= sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam'])", "at the exit from the layer # the Air layer is added to", "/gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor", "self.results = results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path = path.join(self.results,", "implementation is a flat box perpendicular to beam which is in y direction", "layers y_loc = 10 for layer in range(n_phantom_layers): phantom_material = 'Water' # the", "create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in", "0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] =", "layer # the Air layer is added to get parameters at the entrance", "= f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self,", "but only once geometry_lines = '' if self.no_system: # print(f'system created with: thickness:", "cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0,", "to be run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0,", "{round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible", "mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines +=", "{self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0,", "\"\"\"Main class for creating a macro file to be run by `gate` \"\"\"", "z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box", "specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess from os", "beam which is in y direction all dimensions are in cm two actors", "the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n}", "negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name =", "exit from the layer # the Air layer is added to get parameters", "dimensions are in mm\"\"\" # move starting point from the center to the", "self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== #", "mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm", "= f\"\"\"#************************************************************* # attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save", "system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): '''", "dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0,", "self.system_thickness / 2 - thickness / 2 + y_loc # print(f'y location of", "macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp", "sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material =", "actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag", "== 1 or layer == n_phantom_layers - 1: phantom_material = 'Skull' # layers", "print(f'Info for the system created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for", "#===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS", "2 - thickness / 2 + y_loc # print(f'y location of sensor {n}", "/gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm", "return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'):", "self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #=====================================================", "thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system =", "#===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition", "return out def create_macro_file(self): \"\"\"creates the main macro file output: name the macro", "#===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #=====================================================", "first and the last layer elif layer == 1 or layer == n_phantom_layers", "in the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) #", "= macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material = sensor_material self.physics_list =", "physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f'''", "of the particles are recorded at the exit from the layer # the", "/gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n}", "{system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation", "to the beam the beam is along y direction all dimensions are in", "to beam which is in y direction all dimensions are in cm two", "#/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "/gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1", "'Water' # the parameters of the particles are recorded at the exit from", "# /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\"", "'y':250, 'z':0}): self.beam_created = True lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource", "of sensor {n} is: {y_loc}') # create system fisrt but only once geometry_lines", "# INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== #", "roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness =", "created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print()", "/gate/random/verbose 1 # /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries", "physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion", "f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]}", "2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative", "/gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared", "/gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton '''", "mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad", "GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS", "# create system with sensors and readout chips for i_sensor in range(n_sensors): sensor_loc", "= strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system = True def print_info(self):", "system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material", "JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}')", "x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating", "color = 'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box", "import strftime class MacroWriter: \"\"\"Main class for creating a macro file to be", "the real phantom layer if layer == 0: phantom_material = 'Air' # set", "= MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc = 10 for", "class MacroWriter: \"\"\"Main class for creating a macro file to be run by", "scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton", "PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles", "box. which in the current implementation is a flat box perpendicular to beam", "print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness,", "magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}']", "mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance", "0 \"\"\" return out def create_macro_file(self): \"\"\"creates the main macro file output: name", "# BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #===============================================", "at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False #", "1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam']", "Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0 #", "= 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness =", "- thickness / 2 + y_loc # print(f'y location of sensor {n} is:", "\"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self,", "= f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200,", "# attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo", "sensors and readout chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness)", "added to get parameters at the entrance to the real phantom layer if", "macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum): '''Helpfull function that", "created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc)", "y_loc=self.system_y_loc) self.no_system = False # then add sensor lines geometry_lines += f\"\"\" #sensor", "x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating a sensor In", "/gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor", "/gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter", "and enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac'", "run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'):", "once geometry_lines = '' if self.no_system: # print(f'system created with: thickness: {self.system_thickness} at", "1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines", "box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false", "/control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #=====================================================", "layer == 0: phantom_material = 'Air' # set material to Skull for the", "log_folder='../logs'): \"\"\"runs macro file the log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log'", "layer == 1 or layer == n_phantom_layers - 1: phantom_material = 'Skull' #", "enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation']", "'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams()", "create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system", "if layer == 0: phantom_material = 'Air' # set material to Skull for", "print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a", "#===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]}", "{z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* #", "actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world", "thickness / 2 + y_loc # print(f'y location of sensor {n} is: {y_loc}')", "dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true", "Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}',", "roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100,", "cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm", "True def print_info(self): print(f'Info for the system created on {self.timestamp}...') print(f'Number of phantom", "the top_surface of the system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness /", "physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs = logs_folder self.sensor_material = sensor_material", "(sensor_thickness + sensor_pitch) * n_sensors # initialize an instance of MacroWriter my_macro =", "'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict = {} self.actor_dict = {} self.system_y_loc", "subprocess from os import path from time import strftime class MacroWriter: \"\"\"Main class", "+ y_loc # print(f'y location of sensor {n} is: {y_loc}') # create system", "= f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout,", "'create_all', 'run_macro', 'Ek'] # Cell import subprocess from os import path from time", "the system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 - thickness", "= path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull':", "n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister", "from time import strftime class MacroWriter: \"\"\"Main class for creating a macro file", "path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import", "/gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200,", "{z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan", "ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines += item macro_name", "{sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm", "/gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with", "self.geometry_dict = {} self.physics_dict = {} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness", "'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines", "return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file", "subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum): '''Helpfull function", "beam_energy = 250, distance_to_system = 1, system_thickness = 1, n_sensors = 1, sensor_pitch", "0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter", "the log file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name)", "layer elif layer == 1 or layer == n_phantom_layers - 1: phantom_material =", "/gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0", "f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness}", "'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system =", "{self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add", "with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system", "''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines)", "/control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS", "Enable the following lines to display available and enabled processes # /gate/physics/processList Available", "root file dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry()", "in cm two actors are currently added to this volume PhaseSpaceActor and DoseActor", "world ''' for item in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors']", "f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250,", "mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 '''", "self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict =", "thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength", "recorded at the exit from the layer # the Air layer is added", "f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color = 'yellow' geometry_lines = f\"\"\"", "create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a", "\"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor", "geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add sensor", "and sensors for item in self.physics_dict.values(): physics_lines += item # write to file", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db #", "/gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0", "cm two actors are currently added to this volume PhaseSpaceActor and DoseActor '''", "self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines =", "{} self.physics_dict = {} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness", "mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material} /gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\"", "self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam',", "BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== #", "''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines)", "phantom and sensors for item in self.physics_dict.values(): physics_lines += item # write to", "print(f'y location of sensor {n} is: {y_loc}') # create system fisrt but only", "/gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self):", "f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1", "= f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy}", "['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess from os import path from", "MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system, system_thickness=system_thickness) # create phantom layers y_loc = 10 for layer", "{n} is: {y_loc}') # create system fisrt but only once geometry_lines = ''", "deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton", "with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase", "['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system", "MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY", "world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon", "triton /gate/physics/ActivateStepLimiter alpha /gate/physics/ActivateStepLimiter GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' #", "1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}',", "= f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world", "is a flat box perpendicular to beam which is in y direction all", "sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness) system_thickness", "self.beam_created = True lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam", "''' # add lines from phantom and sensors for item in self.physics_dict.values(): physics_lines", "sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial", "/gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative", "self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS", "'run_macro', 'Ek'] # Cell import subprocess from os import path from time import", "\"\"\" return out def create_macro_file(self): \"\"\"creates the main macro file output: name the", "from 10 and extend in the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer,", "/gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] =", "/gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with", "False self.no_system = True def print_info(self): print(f'Info for the system created on {self.timestamp}...')", "file to be run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1,", "macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250,", "\"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created: self.create_beam() self.create_start_beams() lines =", "#===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()}", "1: phantom_material = 'Skull' # layers start from 10 and extend in the", "f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1", "PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']}", "= False self.no_system = True def print_info(self): print(f'Info for the system created on", "in the current implementation is a flat box perpendicular to beam which is", "a flat box perpendicular to beam which is in y direction all dimensions", "by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro", "currently added to this volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root')", "item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines)", "self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water',", "250, distance_to_system = 1, system_thickness = 1, n_sensors = 1, sensor_pitch = 1,", "the center to the top_surface of the system # print(f'system thickness: {self.system_thickness}') y_loc", "from phantom and sensors for item in self.physics_dict.values(): physics_lines += item # write", "phantom layer if layer == 0: phantom_material = 'Air' # set material to", "/gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc}", "beam the beam is along y direction all dimensions are in mm\"\"\" #", "f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength", "/gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac'", "the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess", "Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file is time stamped\"\"\"", "mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial", "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__", "print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 - thickness / 2 +", "/gate/box{n}/vis/setVisible 1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts", "f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines", "{self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False", "= path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell", "then add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength", "as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength", "/gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion", "#*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame", "for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc =", "{position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi", "= {} self.physics_dict = {} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness =", "{x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed '''", "box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength", "all dimensions are in mm\"\"\" # move starting point from the center to", "position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines = f''' #===================================================== # BEAMS #=====================================================", "BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1", "if self.no_system: # print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines +=", "#/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with", "MacroWriter: \"\"\"Main class for creating a macro file to be run by `gate`", "= f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_start_beams(self,", "self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor", "True lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton", "def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file is time stamped\"\"\" log_file_name", "PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']}", "1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item", "a dictionary containing the list of root file dictionary keys are 'trackers', 'hits',", "- roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor +", "system_thickness self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\")", "macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def", "roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file", "= ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess from os import path", "self.beam_created: self.create_beam() self.create_start_beams() lines = f\"\"\" #===================================================== # GEOMETRY #===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #=====================================================", "sensor_pitch) * n_sensors # initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness -", "'Air' # set material to Skull for the first and the last layer", "phantom_material = 'Air' # set material to Skull for the first and the", "HadronIonisation proton ''' # add lines from phantom and sensors for item in", "= {key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers =", "/gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates the main macro file output:", "sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict", "1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1", "y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm", "box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation", "list of root file dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics()", "create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName", "n_phantom_layers - 1: phantom_material = 'Skull' # layers start from 10 and extend", "#================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #=====================================================", "#/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random", "for creating a sensor In the current implementation sensor is a flat plane", "the current implementation is a flat box perpendicular to beam which is in", "(infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n}", "10 for layer in range(n_phantom_layers): phantom_material = 'Water' # the parameters of the", "and the last layer elif layer == 1 or layer == n_phantom_layers -", "# Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file is time", "{y_loc}') # create system fisrt but only once geometry_lines = '' if self.no_system:", "'Water', beam_energy = 250, distance_to_system = 1, system_thickness = 1, n_sensors = 1,", "in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc -", "+= self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add sensor lines", "123456 # /gate/random/verbose 1 # /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1", "system # print(f'system thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 - thickness /", "macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def", "DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose", "to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam']", "[1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system = 1, system_thickness = 1,", "is added to get parameters at the entrance to the real phantom layer", "log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname],", "y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro for creating a phantom", "f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path", "+= f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm", "= {} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files =", "/gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name", "file output: name the macro file, a dictionary containing the list of root", "# JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose", "#=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with", "item # write to file and mark it with current timestamp macro_name =", "lines = f''' /gate/run/initialize # Enable the following lines to display available and", "f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for", "nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import", "= physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world", "# Physics (infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm", "item in self.physics_dict.values(): physics_lines += item # write to file and mark it", "f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm", "''' for item in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] =", "on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def", "''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water',", "phantom_material = 'Water', beam_energy = 250, distance_to_system = 1, system_thickness = 1, n_sensors", "the current implementation sensor is a flat plane perpendicular to the beam the", "/gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values():", "a macro file to be run by `gate` \"\"\" def __init__(self, macro_folder='../mac', results_folder='../results',", "File to edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek']", "SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as", "y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors and", "{self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self,", "sensor is a flat plane perpendicular to the beam the beam is along", "f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000", "stderr=subprocess.STDOUT) # Cell import math def Ek(mass, momentum): '''Helpfull function that converts momentum", "# Cell import subprocess from os import path from time import strftime class", "stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate',", "run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log file is time stamped\"\"\" log_file_name =", "\"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0,", "roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor,", "geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list}", "print(f'Number of phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0,", "PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION", "PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path)", "mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#*************************************************************", "create_initialization(self): lines = f''' /gate/run/initialize # Enable the following lines to display available", "= 'Water' # the parameters of the particles are recorded at the exit", "= 'Water', beam_energy = 250, distance_to_system = 1, system_thickness = 1, n_sensors =", "self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam']) self.geometry_dict = {} self.physics_dict =", "create system with sensors and readout chips for i_sensor in range(n_sensors): sensor_loc =", "f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out =", "def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}):", "self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self): lines =", "energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines = f''' #===================================================== #", "sensors for item in self.physics_dict.values(): physics_lines += item # write to file and", "/gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air ''' for", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db", "proton ''' # add lines from phantom and sensors for item in self.physics_dict.values():", "= geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0,", "= self.system_thickness, y_loc=self.system_y_loc) self.no_system = False # then add sensor lines geometry_lines +=", "main macro file output: name the macro file, a dictionary containing the list", "return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21,", "# START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start", "# to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac'", "- roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness)", "(unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path", "/gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}']", "= f''' /gate/run/initialize # Enable the following lines to display available and enabled", "tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material ==", "perpendicular to the beam the beam is along y direction all dimensions are", "box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to the box{n}", "import path from time import strftime class MacroWriter: \"\"\"Main class for creating a", "this volume PhaseSpaceActor and DoseActor ''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results,", "true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] = physics_lines self.actor_dict[f'layer{n}']", "'z':0}): self.beam_created = True lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS", "direction all dimensions are in mm\"\"\" # move starting point from the center", "/gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries", "is in y direction all dimensions are in cm two actors are currently", "/gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path}", "fisrt but only once geometry_lines = '' if self.no_system: # print(f'system created with:", "#=============================================== {self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f:", "phantom layers: {self.n_phantom_layers}') for layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200,", "display available and enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name", "f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world '''", "= system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for key in ['trackers', 'hits',", "from the center to the top_surface of the system # print(f'system thickness: {self.system_thickness}')", "Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0,", "mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial", "0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates", "Physics (infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion", "/gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag 0 \"\"\"", "file dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization()", "#===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #=====================================================", "phantom_thickness = sum(phantom_layer_thickness) system_thickness = (sensor_thickness + sensor_pitch) * n_sensors # initialize an", "world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner", "# the parameters of the particles are recorded at the exit from the", "x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length}", "is along y direction all dimensions are in mm\"\"\" # move starting point", "phantom box. which in the current implementation is a flat box perpendicular to", "self.no_system = False # then add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name", "with sensors and readout chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch +", "self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for", "/gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm", "self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0,", "= f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength", "mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1", "as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True", "False # then add sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert", "mm*mrad #/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle", "= True def print_info(self): print(f'Info for the system created on {self.timestamp}...') print(f'Number of", "open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers", "import subprocess from os import path from time import strftime class MacroWriter: \"\"\"Main", "mm\"\"\" # move starting point from the center to the top_surface of the", "GATE macro for creating a sensor In the current implementation sensor is a", "strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created = False self.no_system = True def print_info(self): print(f'Info", "/gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/box{n}/setMaterial {material}", "1 # /gate/source/verbose 0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries}", "\"\"\"creates the main macro file output: name the macro file, a dictionary containing", "f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines", "{self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]}", "1 or layer == n_phantom_layers - 1: phantom_material = 'Skull' # layers start", "mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach", "'' if self.no_system: # print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines", "particles are recorded at the exit from the layer # the Air layer", "/gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance", "== n_phantom_layers - 1: phantom_material = 'Skull' # layers start from 10 and", "negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg", "true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10", "mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad", "mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines += item macro_name =", "{y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible 1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return", "a GATE macro for creating a phantom box. which in the current implementation", "system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm", "/gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc}", "/gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1,", "0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor", "{self.create_output()} /gate/application/start exit \"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines)", "1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom and system geometries\"\"\" phantom_thickness = sum(phantom_layer_thickness)", "the first and the last layer elif layer == 1 or layer ==", "{n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "file is time stamped\"\"\" log_file_name = f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+')", "def Ek(mass, momentum): '''Helpfull function that converts momentum to kinetic energy''' return math.sqrt(mass**2", "# /gate/physics/processList Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines self.physics_dict[f'layer{n}'] =", "f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag", "my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file()", "available and enabled processes # /gate/physics/processList Available # /gate/physics/processList Enabled ''' macro_name =", "to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true", "f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines =", "/gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 # /gate/random/verbose 1 # /gate/source/verbose 0 # to check", "0 /gate/output/root/setRootOpticalFlag 0 \"\"\" return out def create_macro_file(self): \"\"\"creates the main macro file", "# Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy", "BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV", "my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors and readout chips for", "scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1 mm /gate/physics/SetMaxStepSizeInRegion world", "print_info(self): print(f'Info for the system created on {self.timestamp}...') print(f'Number of phantom layers: {self.n_phantom_layers}')", "f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] = geometry_lines self.physics_dict[f'sensor{n}'] = physics_lines def create_system(self, x_length=200, z_length=200, thickness=1,", "{thickness} mm /gate/scanner/geometry/setZLength {z_length} mm /gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/scanner/setMaterial Air /gate/scanner/vis/setVisible", "/gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion", "def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250,", "z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating a", "/gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the", "for item in self.actor_dict.values(): actor_lines += item macro_name = f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name", "the main macro file output: name the macro file, a dictionary containing the", "self.physics_dict = {} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files", "the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create", "/gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion", "sensor lines geometry_lines += f\"\"\" #sensor /gate/scanner/daughters/name sensor{n} /gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm", "thickness=0.1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating a sensor", "+ 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'):", "create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS #===================================================== # JamesRandom Ranlux64", "f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS #===================================================== #", "# print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}') geometry_lines += self.create_system(thickness =", "macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder", "system_y_loc self.system_thickness = system_thickness self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']}", "color='blue'): ''' Compose a GATE macro for creating a phantom box. which in", "# create system fisrt but only once geometry_lines = '' if self.no_system: #", "macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def", "+ '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag", "f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length}", "= f\"\"\" #phatom box {n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength", "with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(geometry_lines) def create_actors(self): actor_lines = f''' /gate/actor/addActor ProtonNuclearInformationActor", "#===================================================== /control/execute {self.macro}/{self.macro_dict[\"geometry\"]} #===================================================== # PHYSICS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================", "in mm\"\"\" # move starting point from the center to the top_surface of", "GenericIon /gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines from phantom", "def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose", "print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc=", "are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if not self.beam_created:", "physics_lines self.actor_dict[f'layer{n}'] = actor_lines def create_physics(self): physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1", "physics_lines += item # write to file and mark it with current timestamp", "mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm", "{self.physics_list} /gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm", "{z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta", "{key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0", "# layers start from 10 and extend in the negative y direction y_loc", "= -(sensor_pitch + sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2", "the list of root file dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry()", "/gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1 mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\"", "1 /gate/box{n}/vis/setColor {color} \"\"\" physics_lines = f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for", "10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "to the real phantom layer if layer == 0: phantom_material = 'Air' #", "Cell import subprocess from os import path from time import strftime class MacroWriter:", "* i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc:", "1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\" self.geometry_dict[f'layer{n}'] = geometry_lines", "{self.macro}/{self.macro_dict[\"physics\"]} #================================================================ # ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]}", "/gate/physics/Gamma/SetCutInRegion world 1 mm /gate/physics/Electron/SetCutInRegion world 1 mm /gate/physics/Positron/SetCutInRegion world 1 mm /gate/physics/Gamma/SetCutInRegion", "''' Compose a GATE macro for creating a phantom box. which in the", "it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} -", "def print_info(self): print(f'Info for the system created on {self.timestamp}...') print(f'Number of phantom layers:", "self.physics_dict.values(): physics_lines += item # write to file and mark it with current", "INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START", "results_folder='../results', logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs", "create phantom layers y_loc = 10 for layer in range(n_phantom_layers): phantom_material = 'Water'", "/gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true #/gate/actor/dose{n}/enableNumberOfHits true \"\"\"", "2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3 mrad /gate/source/PBS/setSigmaPhi 3 mrad /gate/source/PBS/setEllipseXThetaEmittance 15", "material=phantom_material) # create system with sensors and readout chips for i_sensor in range(n_sensors):", "# Cell import math def Ek(mass, momentum): '''Helpfull function that converts momentum to", "x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro", "= f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self):", "create_geometry(self): geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000", "{tracker_results_path} /gate/actor/tracker{n}/attachTo box{n} /gate/actor/tracker{n}/enableNuclearFlag true /gate/actor/tracker{n}/enableProductionProcess false #/gate/actor/tracker{n}/useVolumeFrame true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor", "y_loc=0, z_loc=0, system='scanner'): \"\"\"Compose a GATE macro for creating a sensor In the", "#/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines from phantom and sensors", "= f\"\"\"#************************************************************* # Physics (infrared divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n}", "/gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as", "location of sensor {n} is: {y_loc}') # create system fisrt but only once", "real phantom layer if layer == 0: phantom_material = 'Air' # set material", "= 'Skull' # layers start from 10 and extend in the negative y", "#===================================================== /control/execute {self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT", "true /gate/actor/tracker{n}/storeOutgoingParticles true /gate/actor/addActor DoseActor dose{n} /gate/actor/dose{n}/save {dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution", "i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}')", "readout chips for i_sensor in range(n_sensors): sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor", "system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness}", "{self.macro}/{self.macro_dict[\"beam\"]} #===================================================== # START BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #===============================================", "f: f.write(lines) return macro_name, self.results_files, self.timestamp # Cell def create_all(n_phantom_layers = 21, phantom_layer_thickness", "sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines = f''' #===================================================== # BEAMS", "'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created =", "def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created = True lines = f'''", "key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers = 0 self.beam_created =", "lines = f''' #===================================================== # BEAMS #===================================================== /gate/source/addSource PBS PencilBeam /gate/source/PBS/setParticleType proton /gate/source/PBS/setEnergy", "/gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta 3", "{} self.actor_dict = {} self.system_y_loc = system_y_loc self.system_thickness = system_thickness self.results_files = {key:[]", "layer in range(self.n_phantom_layers): print() def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0,", "the macro file, a dictionary containing the list of root file dictionary keys", "# set material to Skull for the first and the last layer elif", "extend in the negative y direction y_loc -= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material)", "with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT) # Cell import math def", "proton /gate/source/PBS/setEnergy {energy} MeV /gate/source/PBS/setSigmaEnergy {sigma_energy} MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX", "the particles are recorded at the exit from the layer # the Air", "attached actor to the box{n} #*************************************************************** /gate/actor/addActor PhaseSpaceActor tracker{n} /gate/actor/tracker{n}/save {tracker_results_path} /gate/actor/tracker{n}/attachTo box{n}", "move starting point from the center to the top_surface of the system #", "= f'actor{self.timestamp}.mac' self.macro_dict['actors'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(actor_lines) def create_initialization(self):", "/gate/world/daughters/name {system} /gate/world/daughters/insert box /gate/scanner/geometry/setXLength {x_length} mm /gate/scanner/geometry/setYLength {thickness} mm /gate/scanner/geometry/setZLength {z_length} mm", "f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\"", "y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc, thickness=roc_thickness) my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() #", "item in self.geometry_dict.values(): geometry_lines += item macro_name = f'geometry{self.timestamp}.mac' self.macro_dict['geometry'] = macro_name with", "self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp = strftime(\"%Y%b%d_%H%M%S\") self.n_phantom_layers", "{n} /gate/world/daughters/name box{n} /gate/world/daughters/insert box /gate/box{n}/geometry/setXLength {x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length}", "mark it with current timestamp macro_name = f'physics{self.timestamp}.mac' self.macro_dict['physics'] = macro_name with open(f'{self.macro}/{macro_name}',", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0,", "deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name = f\"beam{energy}.mac\" self.macro_dict['beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w')", "0.1 mm /gate/physics/SetMaxStepSizeInRegion world 1 mm /gate/physics/ActivateStepLimiter proton /gate/physics/ActivateStepLimiter deuteron /gate/physics/ActivateStepLimiter triton /gate/physics/ActivateStepLimiter", "flat plane perpendicular to the beam the beam is along y direction all", "1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5, roc_thickness=0.5): \"\"\"sets parameters for phantom", "mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\"", "/gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material} /gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor", "all dimensions are in cm two actors are currently added to this volume", "# START BEAMS #===================================================== # JamesRandom Ranlux64 MersenneTwister /gate/random/setEngineName MersenneTwister /gate/random/setEngineSeed 123456 #", "logs_folder self.sensor_material = sensor_material self.physics_list = physics_list self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation',", "'.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0", "1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines = f\"\"\" \"\"\" self.geometry_dict[f'sensor{n}'] =", "__all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell import subprocess from os import", "\"\"\" macro_name = f'{self.macro}/main_macro{self.timestamp}.mac' with open(macro_name, 'w') as f: f.write(lines) return macro_name, self.results_files,", "parameters at the entrance to the real phantom layer if layer == 0:", "MeV /gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm /gate/source/PBS/setSigmaX 2 mm /gate/source/PBS/setSigmaY 2 mm /gate/source/PBS/setSigmaTheta", "{dose_results_path} /gate/actor/dose{n}/attachTo box{n} /gate/actor/dose{n}/stepHitType random /gate/actor/dose{n}/setResolution 1 10 1 /gate/actor/dose{n}/enableDose true #/gate/actor/dose{n}/enableUncertaintyDose true", "0 # to check Steplimiter #/tracking/verbose 1 #/gate/application/noGlobalOutput /gate/application/setTotalNumberOfPrimaries {n_primaries} \"\"\" macro_name =", "the layer # the Air layer is added to get parameters at the", "ACTORS #================================================================ /control/execute {self.macro}/{self.macro_dict[\"actors\"]} #===================================================== # INITIALISATION #===================================================== /control/execute {self.macro}/{self.macro_dict[\"intialisation\"]} #===================================================== # BEAMS", "sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc} - roc: {roc_loc}') my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc, thickness=sensor_thickness) my_macro.create_sensor(n=i_sensor", "thickness: {self.system_thickness}') y_loc = self.system_thickness / 2 - thickness / 2 + y_loc", "geometry_lines = f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm", "function that converts momentum to kinetic energy''' return math.sqrt(mass**2 + momentum**2) - mass", "= f''' /gate/geometry/setMaterialDatabase ../data/GateMaterials.db # World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength", "path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material == 'Skull': color", "'w') as f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START", "1000 mm /gate/world/setMaterial Air ''' for item in self.geometry_dict.values(): geometry_lines += item macro_name", "= system_thickness self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']} self.timestamp =", "system_thickness=system_thickness) # create phantom layers y_loc = 10 for layer in range(n_phantom_layers): phantom_material", "{self.system_thickness}') y_loc = self.system_thickness / 2 - thickness / 2 + y_loc #", "open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}): self.beam_created", "1 mm /gate/physics/Gamma/SetCutInRegion scanner 0.1 mm /gate/physics/Electron/SetCutInRegion scanner 0.1 mm /gate/physics/Positron/SetCutInRegion scanner 0.1", "'w') as f: f.write(lines) def create_output(self): results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root')", "parameters of the particles are recorded at the exit from the layer #", "f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag 0 /gate/output/root/setRootOpticalFlag", "for creating a macro file to be run by `gate` \"\"\" def __init__(self,", "create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system}", "def create_initialization(self): lines = f''' /gate/run/initialize # Enable the following lines to display", "\"\"\" macro_name = f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines)", "#/gate/source/PBS/setEllipseXThetaRotationNorm negative /gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad /gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90", "the entrance to the real phantom layer if layer == 0: phantom_material =", "out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path} /gate/output/root/setRootHitFlag 1 /gate/output/root/setRootSinglesFlag 0 #/gate/output/root/setRootCoincidencesFlag 0 /gate/output/root/setRootNtupleFlag", "divergence) cuts for the layer{n} #************************************************************* /gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm /gate/physics/Electron/SetCutInRegion box{n} 0.1", "are in cm two actors are currently added to this volume PhaseSpaceActor and", "logs_folder='../logs', system_thickness=1, system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'): self.macro = macro_folder self.results = results_folder self.logs =", "/gate/source/PBS/setEllipseYPhiRotationNorm negative /gate/source/PBS/setRotationAxis 1 0 0 /gate/source/PBS/setRotationAngle 90 deg #/gate/application/setTotalNumberOfPrimaries 10000 ''' macro_name", "BEAMS #===================================================== /control/execute {self.macro}/{self.macro_dict[\"start_beam\"]} #=============================================== # OUTPUT SETTINGS #=============================================== {self.create_output()} /gate/application/start exit \"\"\"", "edit: nbs/00_macrotools.ipynb (unless otherwise specified). __all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek'] # Cell", "create_macro_file(self): \"\"\"creates the main macro file output: name the macro file, a dictionary", "+ sensor_pitch) * n_sensors # initialize an instance of MacroWriter my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness", "-= phantom_layer_thickness[layer] my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors and readout", "current implementation sensor is a flat plane perpendicular to the beam the beam", "/gate/{system}/daughters/insert box /gate/sensor{n}/geometry/setXLength {x_length} mm /gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm /gate/sensor{n}/geometry/setZLength {z_length} mm /gate/sensor{n}/setMaterial {self.sensor_material}", "x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name {system} /gate/world/daughters/insert", "/ 2 - thickness / 2 + y_loc # print(f'y location of sensor", "thickness=phantom_layer_thickness[layer], y_loc=y_loc, material=phantom_material) # create system with sensors and readout chips for i_sensor", "f'gate_stdout_err_{strftime(\"%Y%b%d_%H%M%S\")}.log' log_path = path.join(log_folder, log_file_name) with open(log_path,'a+') as f_stdout: subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT)", "y_loc = 10 for layer in range(n_phantom_layers): phantom_material = 'Water' # the parameters", "f: f.write(lines) def create_start_beams(self, n_primaries=10000): lines = f\"\"\" #===================================================== # START BEAMS #=====================================================", "= '' if self.no_system: # print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}')", "is a flat plane perpendicular to the beam the beam is along y", "thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE macro for creating", "1 /gate/scanner/vis/setColor cyan /gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200,", "f.write(actor_lines) def create_initialization(self): lines = f''' /gate/run/initialize # Enable the following lines to", "Enabled ''' macro_name = f'initialise{self.timestamp}.mac' self.macro_dict['intialisation'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f:", "World /gate/world/geometry/setXLength 1000 mm /gate/world/geometry/setYLength 1000 mm /gate/world/geometry/setZLength 1000 mm /gate/world/setMaterial Air '''", "myNuclearInfoActor /gate/actor/myNuclearInfoActor/attachTo world ''' for item in self.actor_dict.values(): actor_lines += item macro_name =", "name the macro file, a dictionary containing the list of root file dictionary", "{x_loc} {y_loc} {z_loc} mm /gate/sensor{n}/vis/setVisible 1 /gate/sensor{n}/vis/setColor magenta /gate/systems/{system}/level1/attach sensor{n} /gate/sensor{n}/attachCrystalSD \"\"\" physics_lines", "containing the list of root file dictionary keys are 'trackers', 'hits', 'dose' \"\"\"", "# the Air layer is added to get parameters at the entrance to", "results_path = path.join(self.results, f'TrackerHits{self.timestamp}') self.results_files['hits'].append(results_path + '.root') out = f\"\"\" /gate/output/root/enable /gate/output/root/setFileName {results_path}", "create system fisrt but only once geometry_lines = '' if self.no_system: # print(f'system", "''' tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root') dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt') self.results_files['trackers'].append(tracker_results_path) self.results_files['dose'].append(dose_results_path) if material", "/gate/physics/displayCuts #/gate/physics/addProcess LowEnergyHadronIonisation /gate/physics/addProcess HadronIonisation proton ''' # add lines from phantom and", "= f'start_beam{n_primaries}.mac' self.macro_dict['start_beam'] = macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(lines) def create_output(self):", "mm /gate/physics/Positron/SetCutInRegion box{n} 0.1 mm \"\"\" actor_lines = f\"\"\"#************************************************************* # attached actor to", "n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, material='Water', color='blue'): ''' Compose a GATE", "def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0, system='scanner'): system_lines = f''' /gate/world/daughters/name", "/gate/scanner/vis/setLineStyle dashed ''' return system_lines def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0,", "= [1]*21, phantom_material = 'Water', beam_energy = 250, distance_to_system = 1, system_thickness =", "'''Helpfull function that converts momentum to kinetic energy''' return math.sqrt(mass**2 + momentum**2) -", "= macro_name with open(f'{self.macro}/{macro_name}', 'w') as f: f.write(physics_lines) def create_geometry(self): geometry_lines = f'''", "{x_length} mm /gate/box{n}/geometry/setYLength {thickness} mm /gate/box{n}/geometry/setZLength {z_length} mm /gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm", "dictionary keys are 'trackers', 'hits', 'dose' \"\"\" self.create_geometry() self.create_physics() self.create_actors() self.create_geometry() self.create_initialization() if", "entrance to the real phantom layer if layer == 0: phantom_material = 'Air'", "geometry_lines = '' if self.no_system: # print(f'system created with: thickness: {self.system_thickness} at loc:", "material == 'Skull': color = 'yellow' geometry_lines = f\"\"\" #phatom box {n} /gate/world/daughters/name", "math def Ek(mass, momentum): '''Helpfull function that converts momentum to kinetic energy''' return", "+ sensor_thickness) * i_sensor roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2 print(f'sensor {sensor_loc}", "my_macro.create_beam(energy=beam_energy) return my_macro.create_macro_file() # Cell def run_macro(macroname, log_folder='../logs'): \"\"\"runs macro file the log" ]
[ "self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in", "self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.unsupport_fmt_file,", "self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in", "in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath =", "self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3)", "TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx,", "TestCase, expectedFailure # req2toml plugin from req2toml.utils import read_requirments from tests.mixin import CaseMixin,", "# req2toml plugin from req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin class", "test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs),", "plugin from req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin,", "self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for", "= read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for r in req.split(\";\"):", "TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content", "self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath", "self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs", "r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath", "in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def", "5) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin,", "# standard library from unittest import TestCase, expectedFailure # req2toml plugin from req2toml.utils", "self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs:", "in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase):", "self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for", "TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs =", "for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin,", "self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def", "test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs),", ") reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for r", "TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx,", "reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for r in", "in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath", "self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3)", "req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin,", "for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin,", "TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure", "self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs:", "for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases()", "TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs),", "self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content)", "reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for r in", "@expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx,", "import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(", "self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file,", "TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath)", "TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin,", "for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self):", "self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content", "self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath =", "reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self):", "TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs", "req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(", "self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for", "self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for", "= self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req", "self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self):", "reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self):", "test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for", "read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for r in req.split(\";\"): self.assertIn(r,", "class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs =", "for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases()", "for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases()", "in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def", "r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath", "self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for", "library from unittest import TestCase, expectedFailure # req2toml plugin from req2toml.utils import read_requirments", "tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath =", "expectedFailure # req2toml plugin from req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin", "read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases()", "class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content )", "TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs =", "class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content )", "4) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin,", "req in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase):", "standard library from unittest import TestCase, expectedFailure # req2toml plugin from req2toml.utils import", "self.assertEqual(len(reqs), 3) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class", "3) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class", "from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath", "unittest import TestCase, expectedFailure # req2toml plugin from req2toml.utils import read_requirments from tests.mixin", "= read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for r in req.split(\";\"):", "req in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase):", "read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(", ") reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for r", "self.assertEqual(len(reqs), 4) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class", "req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt,", "self.assertEqual(len(reqs), 5) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class", "import TestCase, expectedFailure # req2toml plugin from req2toml.utils import read_requirments from tests.mixin import", "in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def", "self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content)", "for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin,", "self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content)", "self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for", "reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for r in", "@expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.unsupport_fmt_file, self.formatted_content", "req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def", "class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt)", "req2toml plugin from req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin,", "class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content )", "TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs", "req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath =", "def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin,", "r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases()", "TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs", "req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase):", "CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt,", "3) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin,", "test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase):", "= self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req", "in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath =", "TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.unsupport_fmt_file, self.formatted_content ) read_requirments(self.ctx,", "from req2toml.utils import read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase):", "self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt,", "<reponame>benbenbang/Req2Toml<filename>tests/test_read_requirements.py # standard library from unittest import TestCase, expectedFailure # req2toml plugin from", "req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(", "def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath)", "self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt,", "import read_requirments from tests.mixin import CaseMixin, TempfileMixin class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req1_from_right_path_with_txt(self):", "self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req", "= self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs:", "TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs =", "self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for r", "self.assertEqual(len(reqs), 3) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure", "self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in reqs:", "def test_read_req1_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath)", "reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self):", "= self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases()", "def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5)", "= self._create_tempfile( self.right_path_txt, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req", "self.messy_content) class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content", "read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for r in req.split(\";\"): self.assertIn(r,", "test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs),", "reqs: for r in req.split(\";\"): self.assertIn(r, self.formatted_content) @expectedFailure class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase): def", "in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath =", "def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath)", "from unittest import TestCase, expectedFailure # req2toml plugin from req2toml.utils import read_requirments from", "self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.right_path_txt, self.unformatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4)", "class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.unsupport_fmt_file, self.formatted_content )", "self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content)", "for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.unformatted_content) class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin,", "r in req.split(\";\"): self.assertIn(r, self.formatted_content) class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase): def test_read_req2_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath", "TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase): def test_read_req3_from_right_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx,", "TempfileMixin, TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.unsupport_fmt_file, self.formatted_content ) read_requirments(self.ctx, self.tmpPath)", "self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 3) for req in", "self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content) reqs = read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in", "= read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for r in req.split(\";\"):", "TestCase): def test_read_req_from_right_path_with_rst(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile( self.support_fmt_file, self.formatted_content ) reqs = read_requirments(self.ctx,", "self.tmpPath) self.assertEqual(len(reqs), 5) for req in reqs: for r in req.split(\";\"): self.assertIn(r, self.messy_content)", "TempfileMixin, TestCase): def test_read_req_from_wrong_path_with_txt(self): self.set_read_requirements_cases() self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content) read_requirments(self.ctx, self.wrong_path_txt) @expectedFailure class", "read_requirments(self.ctx, self.tmpPath) self.assertEqual(len(reqs), 4) for req in reqs: for r in req.split(\";\"): self.assertIn(r," ]
[ "is False: break def is_binary_file(file_path, block_size=512): \"\"\" If a file can't be decoded", "relevant files for grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next", "a binary file. \"\"\" assert type(file_path) == str try: with open(file_path, 'rb') as", "return True except IOError as io_error: return False def with_read(file_path): def wrapper(func): with", "can't be decoded by ascii or there are NULL ('\\x00') bytes assume this", "bytes assume this is a binary file. \"\"\" assert type(file_path) == str try:", "return False # Consider an empty file a text file try: block.decode('ascii') return", "def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir) is str", "searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is bool for root, dirs, files", "return True # Consider files containing null bytes binary elif not block: return", "file to be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is bool for", "file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an actual", "os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an actual file on disk. if", "Consider files containing null bytes binary elif not block: return False # Consider", "False except UnicodeDecodeError: return True except IOError as io_error: return False def with_read(file_path):", "\"\"\" If a file can't be decoded by ascii or there are NULL", "files containing null bytes binary elif not block: return False # Consider an", "f: block = f.read(block_size) if b'\\x00' in block: return True # Consider files", "\"\"\"Supplies relevant files for grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates", "== str try: with open(file_path, 'rb') as f: block = f.read(block_size) if b'\\x00'", "binary file. \"\"\" assert type(file_path) == str try: with open(file_path, 'rb') as f:", "next file to be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is bool", "for root, dirs, files in os.walk(caller_dir): for f in files: # Environment specific", "type(is_recursive) is bool for root, dirs, files in os.walk(caller_dir): for f in files:", "or there are NULL ('\\x00') bytes assume this is a binary file. \"\"\"", "('\\x00') bytes assume this is a binary file. \"\"\" assert type(file_path) == str", "# Consider files containing null bytes binary elif not block: return False #", "this is a binary file. \"\"\" assert type(file_path) == str try: with open(file_path,", "return False except UnicodeDecodeError: return True except IOError as io_error: return False def", "get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir) is str assert", "os.path.isfile(file_path): yield file_path if is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\" If", "to be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is bool for root,", "True except IOError as io_error: return False def with_read(file_path): def wrapper(func): with open(file_path,", "\"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is", "files in os.walk(caller_dir): for f in files: # Environment specific file paths. file_path", "is str assert type(is_recursive) is bool for root, dirs, files in os.walk(caller_dir): for", "root, dirs, files in os.walk(caller_dir): for f in files: # Environment specific file", "as f: block = f.read(block_size) if b'\\x00' in block: return True # Consider", "bool for root, dirs, files in os.walk(caller_dir): for f in files: # Environment", "for f in files: # Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f))", "f.read(block_size) if b'\\x00' in block: return True # Consider files containing null bytes", "\"\"\" assert type(file_path) == str try: with open(file_path, 'rb') as f: block =", "os.walk(caller_dir): for f in files: # Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root,", "file try: block.decode('ascii') return False except UnicodeDecodeError: return True except IOError as io_error:", "files for grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file", "file_path if is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\" If a file", "file a text file try: block.decode('ascii') return False except UnicodeDecodeError: return True except", "ascii or there are NULL ('\\x00') bytes assume this is a binary file.", "block = f.read(block_size) if b'\\x00' in block: return True # Consider files containing", "block: return False # Consider an empty file a text file try: block.decode('ascii')", "IOError as io_error: return False def with_read(file_path): def wrapper(func): with open(file_path, 'r') as", "except IOError as io_error: return False def with_read(file_path): def wrapper(func): with open(file_path, 'r')", "os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert", "if is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\" If a file can't", "type(file_path) == str try: with open(file_path, 'rb') as f: block = f.read(block_size) if", "it is an actual file on disk. if os.path.isfile(file_path): yield file_path if is_recursive", "UnicodeDecodeError: return True except IOError as io_error: return False def with_read(file_path): def wrapper(func):", "True # Consider files containing null bytes binary elif not block: return False", "there are NULL ('\\x00') bytes assume this is a binary file. \"\"\" assert", "assert type(caller_dir) is str assert type(is_recursive) is bool for root, dirs, files in", "with open(file_path, 'rb') as f: block = f.read(block_size) if b'\\x00' in block: return", "as io_error: return False def with_read(file_path): def wrapper(func): with open(file_path, 'r') as f:", "block: return True # Consider files containing null bytes binary elif not block:", "file on disk. if os.path.isfile(file_path): yield file_path if is_recursive is False: break def", "'rb') as f: block = f.read(block_size) if b'\\x00' in block: return True #", "paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an actual file", "null bytes binary elif not block: return False # Consider an empty file", "is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\" If a file can't be", "break def is_binary_file(file_path, block_size=512): \"\"\" If a file can't be decoded by ascii", "in os.walk(caller_dir): for f in files: # Environment specific file paths. file_path =", "file can't be decoded by ascii or there are NULL ('\\x00') bytes assume", "False # Consider an empty file a text file try: block.decode('ascii') return False", "= f.read(block_size) if b'\\x00' in block: return True # Consider files containing null", "= os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an actual file on disk.", "# Check if it is an actual file on disk. if os.path.isfile(file_path): yield", "dirs, files in os.walk(caller_dir): for f in files: # Environment specific file paths.", "elif not block: return False # Consider an empty file a text file", "is a binary file. \"\"\" assert type(file_path) == str try: with open(file_path, 'rb')", "empty file a text file try: block.decode('ascii') return False except UnicodeDecodeError: return True", "Consider an empty file a text file try: block.decode('ascii') return False except UnicodeDecodeError:", "in block: return True # Consider files containing null bytes binary elif not", "str assert type(is_recursive) is bool for root, dirs, files in os.walk(caller_dir): for f", "import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir)", "# Consider an empty file a text file try: block.decode('ascii') return False except", "not block: return False # Consider an empty file a text file try:", "specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an", "if os.path.isfile(file_path): yield file_path if is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\"", "block.decode('ascii') return False except UnicodeDecodeError: return True except IOError as io_error: return False", "is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive)", "a text file try: block.decode('ascii') return False except UnicodeDecodeError: return True except IOError", "if it is an actual file on disk. if os.path.isfile(file_path): yield file_path if", "containing null bytes binary elif not block: return False # Consider an empty", "for grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to", "except UnicodeDecodeError: return True except IOError as io_error: return False def with_read(file_path): def", "text file try: block.decode('ascii') return False except UnicodeDecodeError: return True except IOError as", "file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is an actual file on", "a file can't be decoded by ascii or there are NULL ('\\x00') bytes", "bytes binary elif not block: return False # Consider an empty file a", "grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be", "assume this is a binary file. \"\"\" assert type(file_path) == str try: with", "yield file_path if is_recursive is False: break def is_binary_file(file_path, block_size=512): \"\"\" If a", "# Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it", "are NULL ('\\x00') bytes assume this is a binary file. \"\"\" assert type(file_path)", "file. \"\"\" assert type(file_path) == str try: with open(file_path, 'rb') as f: block", "import os import sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\"", "try: block.decode('ascii') return False except UnicodeDecodeError: return True except IOError as io_error: return", "disk. if os.path.isfile(file_path): yield file_path if is_recursive is False: break def is_binary_file(file_path, block_size=512):", "str try: with open(file_path, 'rb') as f: block = f.read(block_size) if b'\\x00' in", "sys def get_next_file(caller_dir, is_recursive): \"\"\"Generates next file to be searched.\"\"\" assert type(caller_dir) is", "If a file can't be decoded by ascii or there are NULL ('\\x00')", "in files: # Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check", "f in files: # Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) #", "an empty file a text file try: block.decode('ascii') return False except UnicodeDecodeError: return", "NULL ('\\x00') bytes assume this is a binary file. \"\"\" assert type(file_path) ==", "is_binary_file(file_path, block_size=512): \"\"\" If a file can't be decoded by ascii or there", "def is_binary_file(file_path, block_size=512): \"\"\" If a file can't be decoded by ascii or", "by ascii or there are NULL ('\\x00') bytes assume this is a binary", "Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if it is", "b'\\x00' in block: return True # Consider files containing null bytes binary elif", "return False def with_read(file_path): def wrapper(func): with open(file_path, 'r') as f: return func(f)", "False def with_read(file_path): def wrapper(func): with open(file_path, 'r') as f: return func(f) return", "assert type(is_recursive) is bool for root, dirs, files in os.walk(caller_dir): for f in", "an actual file on disk. if os.path.isfile(file_path): yield file_path if is_recursive is False:", "decoded by ascii or there are NULL ('\\x00') bytes assume this is a", "if b'\\x00' in block: return True # Consider files containing null bytes binary", "assert type(file_path) == str try: with open(file_path, 'rb') as f: block = f.read(block_size)", "<reponame>florianbegusch/simple_grep \"\"\"Supplies relevant files for grep.py.\"\"\" import os import sys def get_next_file(caller_dir, is_recursive):", "be decoded by ascii or there are NULL ('\\x00') bytes assume this is", "binary elif not block: return False # Consider an empty file a text", "on disk. if os.path.isfile(file_path): yield file_path if is_recursive is False: break def is_binary_file(file_path,", "try: with open(file_path, 'rb') as f: block = f.read(block_size) if b'\\x00' in block:", "Check if it is an actual file on disk. if os.path.isfile(file_path): yield file_path", "f)) # Check if it is an actual file on disk. if os.path.isfile(file_path):", "block_size=512): \"\"\" If a file can't be decoded by ascii or there are", "be searched.\"\"\" assert type(caller_dir) is str assert type(is_recursive) is bool for root, dirs,", "type(caller_dir) is str assert type(is_recursive) is bool for root, dirs, files in os.walk(caller_dir):", "actual file on disk. if os.path.isfile(file_path): yield file_path if is_recursive is False: break", "def with_read(file_path): def wrapper(func): with open(file_path, 'r') as f: return func(f) return wrapper", "False: break def is_binary_file(file_path, block_size=512): \"\"\" If a file can't be decoded by", "files: # Environment specific file paths. file_path = os.path.normpath('{0}/{1}'.format(root, f)) # Check if", "is bool for root, dirs, files in os.walk(caller_dir): for f in files: #", "open(file_path, 'rb') as f: block = f.read(block_size) if b'\\x00' in block: return True", "is an actual file on disk. if os.path.isfile(file_path): yield file_path if is_recursive is", "io_error: return False def with_read(file_path): def wrapper(func): with open(file_path, 'r') as f: return" ]
[ "def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for a patent entry. Contains", "TEXT, year INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\")", "patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee", "Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual", "= ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind]", "patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only first assignee location is retrieved", "company assigned to patent - skipping.\") continue try: city = get_location(html) loc =", "corresponding to a place using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location})", "a place using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location']", "search for a patent entry. Contains extra information (location, text) not available through", "db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE", "USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude", "def get_location(patent_html): \"\"\"Gets location of company associated with patent entry (dict).\"\"\" # Grab", "\"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check if", "100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check if request went through", "number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and", "Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO", "city = get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab", "# Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that", "https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link", "(granted) patent applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\",", "# USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\"", "extra information (location, text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content,", "requests.get(USPTO_API, params=search_params) # Check if request went through successfully (status code 200) if", "ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry (dict).", "search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response", "def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a place using Google Maps", "\"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to", "to patent - skipping.\") continue try: city = get_location(html) loc = get_latlon(city) print(city,", "INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit()", "requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search", "page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location):", "and longitude corresponding to a place using Google Maps API.\"\"\" result = requests.get(MAPS_API,", "Check if request went through successfully (status code 200) if response.status_code == 200:", "get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab location information", "associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': #", "get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab location information - skipping.\") continue", "there's no company listed. if \"assignee\" not in pat: print(\"No company assigned to", "\"\"\"Gets abstract of company associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if", "IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title TEXT, year", "return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated with patent entry", "\"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check if request went", "MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page by patent number:", "print(pat['patentNumber'], pat['title']) # Skip patent if there's no company listed. if \"assignee\" not", "== 200: # Get list of results patents = response.json()['response']['docs'] # Populate a", "import requests import bs4 import sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/", "a patent entry. Contains extra information (location, text) not available through API.\"\"\" patent_html", "# Search for successful (granted) patent applications in nanotechnology search_params = {\"searchText\": \"nano\",", "pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close() else: print(\"Unexpected response code:\",", "Contains extra information (location, text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return", "# Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API =", "TEXT, city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for pat in patents:", "result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO", "USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated with patent", "Split tag contents so that only first assignee location is retrieved ass_text =", "skipping.\") continue try: city = get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError,", "= get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab location", "assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind =", "KEY, title TEXT, year INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat REAL,", "INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT, city TEXT, abstract TEXT,", "\"\"\"Gets title of patent entry (dict). Avoids case mangling (MRI -> Mri) associated", "(?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close() else: print(\"Unexpected", "sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API", "REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title'])", "data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY,", "is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return", "print(\"Can't grab location information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents", "USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" #", "of company associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ ==", "print(\"No company assigned to patent - skipping.\") continue try: city = get_location(html) loc", "scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's no company", "\"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a place using Google", "'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check if request went through successfully", "patents (id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT, city TEXT,", "first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind", "tag contents so that only first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n',", "individual USPTO search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB =", "ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only first assignee location", "TEXT, lat REAL, lng REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title']", "return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids", "(MRI -> Mri) associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html):", "# Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id", "Link to individual USPTO search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\"", "company associated with patent entry (dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next()", "import bs4 import sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google", "abstract of company associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__", "case mangling (MRI -> Mri) associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split())", "for successful (granted) patent applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\",", "API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API", "= ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry", "Mri) associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract", "\"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page by patent", "nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'}", "(id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT, city TEXT, abstract", "lat REAL, lng REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] =", "API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated with", "entry. Contains extra information (location, text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num)", "new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF", "Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns", "__name__ == '__main__': # Search for successful (granted) patent applications in nanotechnology search_params", "of results patents = response.json()['response']['docs'] # Populate a new SQLite database db =", "through successfully (status code 200) if response.status_code == 200: # Get list of", "ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind +", "REAL, lng REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html)", "patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude", "skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']),", "= patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only first assignee location is", "def get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent entry (dict).\"\"\" return '", "ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids case", "# Populate a new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old data", "200: # Get list of results patents = response.json()['response']['docs'] # Populate a new", "Get list of results patents = response.json()['response']['docs'] # Populate a new SQLite database", "\"assignee\" not in pat: print(\"No company assigned to patent - skipping.\") continue try:", "import sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro", "sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents", "(status code 200) if response.status_code == 200: # Get list of results patents", "using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num):", "\"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check if request", "Populate a new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP", "get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split())", "for a patent entry. Contains extra information (location, text) not available through API.\"\"\"", "text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html):", "ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of", "pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's no company listed.", "location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for a", "\"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated with patent entry (dict).\"\"\" #", "API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of", "'desc'} response = requests.get(USPTO_API, params=search_params) # Check if request went through successfully (status", "patent applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\":", "results patents = response.json()['response']['docs'] # Populate a new SQLite database db = sqlite3.connect(SQLITE_DB)", "in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder':", "result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for a patent entry.", "available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location", "\"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page by patent number: USPTO_PAGE =", "assigned to patent - skipping.\") continue try: city = get_location(html) loc = get_latlon(city)", "by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets", "(dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for successful (granted)", "to a place using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return", "list of results patents = response.json()['response']['docs'] # Populate a new SQLite database db", "API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company", "get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids case mangling (MRI -> Mri)", "grab location information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES", "= requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent", "(location, text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def", "def get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids case mangling (MRI ->", "= get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's no company listed. if", "abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city,", "if response.status_code == 200: # Get list of results patents = response.json()['response']['docs'] #", "of company associated with patent entry (dict).\"\"\" # Grab metadata table ass_loc =", "request went through successfully (status code 200) if response.status_code == 200: # Get", "params=search_params) # Check if request went through successfully (status code 200) if response.status_code", "get_location(patent_html): \"\"\"Gets location of company associated with patent entry (dict).\"\"\" # Grab metadata", "(IndexError, KeyError): print(\"Can't grab location information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT", "try: city = get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't", "return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent entry", "patents = response.json()['response']['docs'] # Populate a new SQLite database db = sqlite3.connect(SQLITE_DB) #", "TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title TEXT,", "OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a", "so that only first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind", "that only first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind =", "= \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page by", "company listed. if \"assignee\" not in pat: print(\"No company assigned to patent -", "Avoids case mangling (MRI -> Mri) associated with the API results.\"\"\" return '", "# Get list of results patents = response.json()['response']['docs'] # Populate a new SQLite", "- skipping.\") continue try: city = get_location(html) loc = get_latlon(city) print(city, loc) except", "== '__main__': # Search for successful (granted) patent applications in nanotechnology search_params =", "TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for pat in patents: html =", "successful (granted) patent applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\":", "= {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response =", "city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for pat in patents: html", "longitude corresponding to a place using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\":", "listed. if \"assignee\" not in pat: print(\"No company assigned to patent - skipping.\")", "lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets", "'__main__': # Search for successful (granted) patent applications in nanotechnology search_params = {\"searchText\":", "Skip patent if there's no company listed. if \"assignee\" not in pat: print(\"No", "= requests.get(USPTO_API, params=search_params) # Check if request went through successfully (status code 200)", "= response.json()['response']['docs'] # Populate a new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite", "db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title", "database db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\")", "'.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for successful (granted) patent applications in", "response.status_code == 200: # Get list of results patents = response.json()['response']['docs'] # Populate", "response.json()['response']['docs'] # Populate a new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old", "assignee TEXT, city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for pat in", "\"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) #", "if there's no company listed. if \"assignee\" not in pat: print(\"No company assigned", "= get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr,", "retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind", "\"\"\"Gets latitude and longitude corresponding to a place using Google Maps API.\"\"\" result", "# Link to individual USPTO search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\", "VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close() else:", "= get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab location information - skipping.\")", "= \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page by patent number: USPTO_PAGE", "successfully (status code 200) if response.status_code == 200: # Get list of results", "title of patent entry (dict). Avoids case mangling (MRI -> Mri) associated with", "went through successfully (status code 200) if response.status_code == 200: # Get list", "latitude and longitude corresponding to a place using Google Maps API.\"\"\" result =", "int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close() else: print(\"Unexpected response code:\", response.status_code)", "USPTO patent search for a patent entry. Contains extra information (location, text) not", "- skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'],", "(int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close() else: print(\"Unexpected response", "{\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API,", "location information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\",", "associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of", "print(city, loc) except (IndexError, KeyError): print(\"Can't grab location information - skipping.\") continue abstr", "old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY", "(dict). Avoids case mangling (MRI -> Mri) associated with the API results.\"\"\" return", "entry (dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents", "mangling (MRI -> Mri) associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def", "# Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to", "response = requests.get(USPTO_API, params=search_params) # Check if request went through successfully (status code", "table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only first assignee", "+ 1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids case mangling", "metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only first", "# Split tag contents so that only first assignee location is retrieved ass_text", "except (IndexError, KeyError): print(\"Can't grab location information - skipping.\") continue abstr = get_abstract(html)", "patent - skipping.\") continue try: city = get_location(html) loc = get_latlon(city) print(city, loc)", "\"\"\"Gets location of company associated with patent entry (dict).\"\"\" # Grab metadata table", "title TEXT, year INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat REAL, lng", "patent entry (dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag", "db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng']))", "USPTO search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\"", "= \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding", "SQLITE_DB = \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a place", "if \"assignee\" not in pat: print(\"No company assigned to patent - skipping.\") continue", "Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API =", "return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for successful (granted) patent", "loc = get_latlon(city) print(city, loc) except (IndexError, KeyError): print(\"Can't grab location information -", "bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated with patent entry (dict).\"\"\"", "= \"db.sqlite\" def get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a place using", "through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of", "lng REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'],", "\"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params)", "patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for", "html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's", "-> Mri) associated with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets", "rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title of patent", "in pat: print(\"No company assigned to patent - skipping.\") continue try: city =", "pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip", "with the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company", "SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF EXISTS", "# Check if request went through successfully (status code 200) if response.status_code ==", "information (location, text) not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\")", "location of company associated with patent entry (dict).\"\"\" # Grab metadata table ass_loc", "BS4/HTML of USPTO patent search for a patent entry. Contains extra information (location,", "continue try: city = get_location(html) loc = get_latlon(city) print(city, loc) except (IndexError, KeyError):", "1:rind] def get_title(patent_html): \"\"\"Gets title of patent entry (dict). Avoids case mangling (MRI", "loc) except (IndexError, KeyError): print(\"Can't grab location information - skipping.\") continue abstr =", "entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for successful", "information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']),", "company associated with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__':", "return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for a patent", "place using Google Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def", "KeyError): print(\"Can't grab location information - skipping.\") continue abstr = get_abstract(html) db.execute(\"INSERT INTO", "= ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html): \"\"\"Gets title", "results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent", "'.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent entry (dict).\"\"\" return", "'') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def get_title(patent_html):", "pat['title']) # Skip patent if there's no company listed. if \"assignee\" not in", "' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search for successful (granted) patent applications", "search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB = \"db.sqlite\" def", "Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps: https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\"", "pat: print(\"No company assigned to patent - skipping.\") continue try: city = get_location(html)", "patent entry. Contains extra information (location, text) not available through API.\"\"\" patent_html =", "bs4 import sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ # Google Maps:", "applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\": \"UTILITY\", \"documentType\": \"grant\", \"rows\": 100,", "to individual USPTO search page by patent number: USPTO_PAGE = \"http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\\ OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN.\" SQLITE_DB", "with patent entry (dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split", "of USPTO patent search for a patent entry. Contains extra information (location, text)", "PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat", "patent search for a patent entry. Contains extra information (location, text) not available", "USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search page", "associated with patent entry (dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() #", "' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated with patent entry (dict).\"\"\"", "of patent entry (dict). Avoids case mangling (MRI -> Mri) associated with the", "Maps API.\"\"\" result = requests.get(MAPS_API, params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML", "year INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for", "patent if there's no company listed. if \"assignee\" not in pat: print(\"No company", "\"UTILITY\", \"documentType\": \"grant\", \"rows\": 100, 'sortOrder': 'desc'} response = requests.get(USPTO_API, params=search_params) # Check", "in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent", "= USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated with", "not available through API.\"\"\" patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets", "patent_html = USPTO_PAGE.format(patent_num) return bs4.BeautifulSoup(requests.get(patent_html).content, \"lxml\") def get_location(patent_html): \"\"\"Gets location of company associated", "INTEGER, assignee TEXT, city TEXT, abstract TEXT, lat REAL, lng REAL)\"\"\") for pat", "the API results.\"\"\" return ' '.join(patent_html.find_all('font')[-1].text.split()) def get_abstract(patent_html): \"\"\"Gets abstract of company associated", "scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for a patent entry. Contains extra", "EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title TEXT, year INTEGER,", "requests import bs4 import sqlite3 # Relevant API Documentation: # USPTO: https://developer.uspto.gov/ibd-api-docs/ #", "not in pat: print(\"No company assigned to patent - skipping.\") continue try: city", "db.execute(\"\"\"CREATE TABLE patents (id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT,", "patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'], loc['lng'])) db.commit() db.close()", "Search for successful (granted) patent applications in nanotechnology search_params = {\"searchText\": \"nano\", \"applicationType\":", "# Skip patent if there's no company listed. if \"assignee\" not in pat:", "\"\"\"Returns BS4/HTML of USPTO patent search for a patent entry. Contains extra information", "only first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\")", "= scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's no", "params={\"address\": location}) return result.json()['results'][0]['geometry']['location'] def scrape_patent_web(patent_num): \"\"\"Returns BS4/HTML of USPTO patent search for", "patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if", "Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so that only", "https://developers.google.com/maps/documentation/geocoding/intro USPTO_API = \"https://developer.uspto.gov/ibd-api/v1/patent/application\" MAPS_API = \"https://maps.googleapis.com/maps/api/geocode/json\" # Link to individual USPTO search", "patent entry (dict). Avoids case mangling (MRI -> Mri) associated with the API", "location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\")", "= sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE", "get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0], city, abstr, loc['lat'],", "TABLE patents (id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT, city", "if __name__ == '__main__': # Search for successful (granted) patent applications in nanotechnology", "contents so that only first assignee location is retrieved ass_text = ass_loc.text.split('\\n\\n')[0].replace('\\n', '')", "Overwrite old data db.execute(\"DROP TABLE IF EXISTS patents\") db.execute(\"\"\"CREATE TABLE patents (id INTEGER", "if request went through successfully (status code 200) if response.status_code == 200: #", "continue abstr = get_abstract(html) db.execute(\"INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)\", (int(pat['patentNumber']), pat['title'], int(pat['year']), pat['assignee'][0],", "code 200) if response.status_code == 200: # Get list of results patents =", "a new SQLite database db = sqlite3.connect(SQLITE_DB) # Overwrite old data db.execute(\"DROP TABLE", "with patent entry (dict).\"\"\" return ' '.join(patent_html.p.contents[0].split()) if __name__ == '__main__': # Search", "for pat in patents: html = scrape_patent_web(pat['patentNumber']) pat['title'] = get_title(html) print(pat['patentNumber'], pat['title']) #", "abstract TEXT, lat REAL, lng REAL)\"\"\") for pat in patents: html = scrape_patent_web(pat['patentNumber'])", "(dict).\"\"\" # Grab metadata table ass_loc = patent_html.find(text=\"Assignee:\").find_next() # Split tag contents so", "get_latlon(location): \"\"\"Gets latitude and longitude corresponding to a place using Google Maps API.\"\"\"", "get_title(html) print(pat['patentNumber'], pat['title']) # Skip patent if there's no company listed. if \"assignee\"", "200) if response.status_code == 200: # Get list of results patents = response.json()['response']['docs']", "entry (dict). Avoids case mangling (MRI -> Mri) associated with the API results.\"\"\"", "ass_loc.text.split('\\n\\n')[0].replace('\\n', '') lind = ass_text.find(\"(\") rind = ass_text.rfind(\")\") return ass_text[lind + 1:rind] def", "no company listed. if \"assignee\" not in pat: print(\"No company assigned to patent" ]
[ "timefrime to use ' 'when pulling data in seconds. Defaults to 14400. Available", "pull in the format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true',", "Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required GET parameters are", "'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR',", "self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return self.data if __name__ ==", "/usr/bin/env python import argparse import datetime import json import time import logging import", "to use ' 'when pulling data in seconds. Defaults to 14400. Available options'", "dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data", "= json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid", "return self.data if __name__ == '__main__': DESCRIPTION = \"\"\" A simple tool to", "'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN',", "date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self):", "directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for", "'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR',", "\"start\", and \"end\". \"Start\" and \"end\" are given in UNIX timestamp format and", "1800, 7200, 14400, and 86400), \"start\", and \"end\". \"Start\" and \"end\" are given", "parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc']", "A simple tool to pull price data from Poloneix's API. The data can", "'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH',", "ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for the data pull in", "datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data pull in the", "lens = [max(map(len, col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x", "the format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A", "save=True): if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return", "memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair',", "for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table =", "## returnChartData Returns candlestick chart data. Required GET parameters are \"currencyPair\", \"period\" (candlestick", "if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)]", "'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC',", "DESTINATION = _dest START_DATE = args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR,", "from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK',", "returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400,", "data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self,", "and \"end\". \"Start\" and \"end\" are given in UNIX timestamp format and used", "pair. ' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error:", "flag used to view currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO')", "as a csv or used in memory as a pandas DataFrame. Poloneix Documentation:", "= [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e)", "is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return", "'\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row) for row in setup] print('\\n'.join(table))", "'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT',", "'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK',", "[AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for", "' 'when pulling data in seconds. Defaults to 14400. Available options' \\ '", "table = [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE =", "int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response def", "in seconds. Defaults to 14400. Available options' \\ ' 300, 900, 1800, 7200,", "parameters are \"currencyPair\", \"period\" (candlestick period in seconds; valid values are 300, 900,", "'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO',", "format and used to specify the date range for the data returned. Sample", "'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC',", "'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC',", "7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which", "are 300, 900, 1800, 7200, 14400, and 86400), \"start\", and \"end\". \"Start\" and", "Exception(f'{self.currency_pair} is not a valid currency pair. ' \\ f'You must use one", "for the data pull in the format YYYY-MM-DD. Defaults ' \\ 'to now.')", "'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC',", "zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row) for row", "use ' 'when pulling data in seconds. Defaults to 14400. Available options' \\", "None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return self.data", "file should be saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime(", "and used to specify the date range for the data returned. Sample output:", "'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data.", "in UNIX timestamp format and used to specify the date range for the", "logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x", "default=None, help='The end date for the data pull in the format YYYY-MM-DD. Defaults", "'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick", "self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period =", "dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data", "if SAVE and _dest is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else:", "DESCRIPTION = \"\"\" A simple tool to pull price data from Poloneix's API.", "14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which '", "currency pair. ' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API", "= [max(map(len, col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in", "'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\"", "'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC',", "\"end\" are given in UNIX timestamp format and used to specify the date", "row] for row in chunks] lens = [max(map(len, col)) for col in zip(*setup)]", "3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in", "else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data)", "setup = [[str(e) for e in row] for row in chunks] lens =", "is not a valid currency pair. ' \\ f'You must use one of:", "= None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def", "self.api = api self.destination = destination self.data = None self.logger = logger self.url", "= 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination", "to pull price data from Poloneix's API. The data can be saved down", "pd import requests from pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS =", "YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used", "= _dest START_DATE = args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION,", "return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error'", "cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols]", "'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM',", "be saved down as a csv or used in memory as a pandas", "argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs to view", "one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data):", "_dest = args.dest if SAVE and _dest is None: home_dir = str(Path.home()) DESTINATION", "parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if", "path to which ' 'the output file should be saved. Defaults to the", "f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None:", "the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01',", "86400), \"start\", and \"end\". \"Start\" and \"end\" are given in UNIX timestamp format", "= api self.destination = destination self.data = None self.logger = logger self.url =", "in lens) table = [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair", "= ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def", "pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use ' 'when pulling data in", "= args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD, start_date=START_DATE, end_date=END_DATE,", "else: return self.data if __name__ == '__main__': DESCRIPTION = \"\"\" A simple tool", "' 300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The", "return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self, response):", "'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a valid currency pair. ' \\", "dest='end_date', type=str, default=None, help='The end date for the data pull in the format", "'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC',", "range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in row] for row in", "'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC',", "be saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() +", "'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI',", "= self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return self.data if __name__", "'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS',", "Defaults to 14400. Available options' \\ ' 300, 900, 1800, 7200, 14400, 86400.',", "'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT',", "period in seconds; valid values are 300, 900, 1800, 7200, 14400, and 86400),", "pull in the format YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date',", "dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is", "'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not", "= requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict)", "'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7,", "Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency", "Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix", "data in seconds. Defaults to 14400. Available options' \\ ' 300, 900, 1800,", "api self.destination = destination self.data = None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair'", "row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD = args.period", "import json import time import logging import pandas as pd import requests from", "{parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime',", "of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data", "price data from Poloneix's API. The data can be saved down as a", "as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair',", "\"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper()", "import pandas as pd import requests from pathlib import Path from retrying import", "class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required", "date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response =", "= destination self.data = None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end='", "and \"end\" are given in UNIX timestamp format and used to specify the", "data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols =", "options' \\ ' 300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str,", "get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple()))", "self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string", "UNIX timestamp format and used to specify the date range for the data", "'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC',", "self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None,", "['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self,", "CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD = args.period _dest = args.dest if", "are \"currencyPair\", \"period\" (candlestick period in seconds; valid values are 300, 900, 1800,", "'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH',", "wait_random_max=2000) def run(self, save=True): if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if", "should be saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow()", "csv or used in memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\"", "'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM',", "data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close',", "from Poloneix's API. The data can be saved down as a csv or", "_dest is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest", "default=14400, help='The timefrime to use ' 'when pulling data in seconds. Defaults to", "return self else: return self.data if __name__ == '__main__': DESCRIPTION = \"\"\" A", "\\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return", "currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks =", "True PERIOD = args.period _dest = args.dest if SAVE and _dest is None:", "data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None,", "args.period _dest = args.dest if SAVE and _dest is None: home_dir = str(Path.home())", "response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error']", "Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None,", "tool to pull price data from Poloneix's API. The data can be saved", "= logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in", "seconds. Defaults to 14400. Available options' \\ ' 300, 900, 1800, 7200, 14400,", "900, 1800, 7200, 14400, and 86400), \"start\", and \"end\". \"Start\" and \"end\" are", "help='The start date for the data pull in the format YYYY-MM-DD. Defaults '", "currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date)", "'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix", "'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to view currency pairs.')", "'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB',", "@retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is None: response = self.get_api_data()", "YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The", "isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise", "7200, 14400, and 86400), \"start\", and \"end\". \"Start\" and \"end\" are given in", "get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if", "__init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp =", "date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data =", "pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str,", "== 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a valid currency pair. '", "dest='dest', type=str, default=None, help='The full path to which ' 'the output file should", "build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime']", "+ 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e", "import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM',", "datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data pull in the format YYYY-MM-DD.", "for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC',", "pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN',", "'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO',", "'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT',", "in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row) for", "def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp", "raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime']", "86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which ' 'the", "'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR',", "self.data = None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}'", "Available options' \\ ' 300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest',", "= 300 self.api = api self.destination = destination self.data = None self.logger =", "start date for the data pull in the format YYYY-MM-DD. Defaults ' \\", "'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC',", "parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the", "'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK',", "self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is None: response =", "pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open',", "Poloneix's API. The data can be saved down as a csv or used", "for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD =", "valid currency pair. ' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise", "'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC',", "GET parameters are \"currencyPair\", \"period\" (candlestick period in seconds; valid values are 300,", "are given in UNIX timestamp format and used to specify the date range", "'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC',", "f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return", "to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use ' 'when pulling", "= args.currency_pair SAVE = True PERIOD = args.period _dest = args.dest if SAVE", "= \"\"\" A simple tool to pull price data from Poloneix's API. The", "print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD = args.period _dest = args.dest", "self else: return self.data if __name__ == '__main__': DESCRIPTION = \"\"\" A simple", "= argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs to", "col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table", "is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url)", "range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self,", "'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK',", "'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD',", "seconds; valid values are 300, 900, 1800, 7200, 14400, and 86400), \"start\", and", "def get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text)", "= self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period", "'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return", "= [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True", "Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA',", "the data pull in the format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs',", "index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is None:", "data. Required GET parameters are \"currencyPair\", \"period\" (candlestick period in seconds; valid values", "Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] =", "= f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is", "'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC',", "response = requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data,", "' 'the output file should be saved. Defaults to the home directory.') parser.add_argument('--start-date',", "14400, and 86400), \"start\", and \"end\". \"Start\" and \"end\" are given in UNIX", "response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return self.data if", "and _dest is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION =", "is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE", "argparse import datetime import json import time import logging import pandas as pd", "\"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use", "'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP',", "dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs to view pairs') parser.add_argument('--period',", "for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in row]", "and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is", "'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG',", "AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST',", "default=False, help='A flag used to view currency pairs.') args = parser.parse_args() logger =", "the date range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\"", "currency pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to", "'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID',", "poloneix currency pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime", "'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX',", "parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols", "self.save_data(self.data) return self else: return self.data if __name__ == '__main__': DESCRIPTION = \"\"\"", "f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple()))", "'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns", "logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0,", "requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and", "= logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'):", "a csv or used in memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/", "parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use ' 'when pulling data in seconds.", "'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2',", "300, 900, 1800, 7200, 14400, and 86400), \"start\", and \"end\". \"Start\" and \"end\"", "action='store_true', default=False, help='A flag used to view currency pairs.') args = parser.parse_args() logger", "destination self.data = None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\", "parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a valid", "help='The timefrime to use ' 'when pulling data in seconds. Defaults to 14400.", "fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row) for row in", "specify the date range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...]", "full path to which ' 'the output file should be saved. Defaults to", "logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999", "can be saved down as a csv or used in memory as a", "14400. Available options' \\ ' 300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest',", "self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date)", "simple tool to pull price data from Poloneix's API. The data can be", "end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not", "must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def", "default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period',", "not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api", "= data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low',", "in the format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False,", "run(self, save=True): if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data)", "'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV',", "start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if", "Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'),", "if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a valid currency", "in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a", "pull price data from Poloneix's API. The data can be saved down as", "if __name__ == '__main__': DESCRIPTION = \"\"\" A simple tool to pull price", "'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData", "Required GET parameters are \"currencyPair\", \"period\" (candlestick period in seconds; valid values are", "'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC',", "str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date END_DATE =", "output file should be saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str,", "retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS',", "requests from pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR',", "https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair.", "help='A flag used to view currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__)", "'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation:", "[[str(e) for e in row] for row in chunks] lens = [max(map(len, col))", "= True PERIOD = args.period _dest = args.dest if SAVE and _dest is", "'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC',", "from pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH',", "logging import pandas as pd import requests from pathlib import Path from retrying", "to 14400. Available options' \\ ' 300, 900, 1800, 7200, 14400, 86400.', type=int)", "save: self.save_data(self.data) return self else: return self.data if __name__ == '__main__': DESCRIPTION =", "[max(map(len, col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)", "'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP',", "'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS',", "'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO',", "for e in row] for row in chunks] lens = [max(map(len, col)) for", "API. The data can be saved down as a csv or used in", "self.period = 300 self.api = api self.destination = destination self.data = None self.logger", "row in chunks] lens = [max(map(len, col)) for col in zip(*setup)] fmt =", "or used in memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser", "retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD',", "'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG',", "'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000,", "in memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION)", "which ' 'the output file should be saved. Defaults to the home directory.')", "DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date END_DATE = args.end_date", "\\ ' 300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None,", "\\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data =", "type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data pull", "datetime import json import time import logging import pandas as pd import requests", "Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to", "'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT',", "output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData',", "default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data pull in", "' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date", "'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC',", "chart data. Required GET parameters are \"currencyPair\", \"period\" (candlestick period in seconds; valid", "raise Exception(f'{self.currency_pair} is not a valid currency pair. ' \\ f'You must use", "e in row] for row in chunks] lens = [max(map(len, col)) for col", "x in lens) table = [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR =", "PERIOD = args.period _dest = args.dest if SAVE and _dest is None: home_dir", "save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if", "parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to view currency pairs.') args =", "home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date", "pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x", "used to view currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if", "in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in row] for row", "'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH',", "return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True)", "(candlestick period in seconds; valid values are 300, 900, 1800, 7200, 14400, and", "= data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data", "python import argparse import datetime import json import time import logging import pandas", "data from Poloneix's API. The data can be saved down as a csv", "dest='pairs', action='store_true', default=False, help='A flag used to view currency pairs.') args = parser.parse_args()", "chunks] lens = [max(map(len, col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for", "= pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc',", "DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A", "default=None, help='The full path to which ' 'the output file should be saved.", "help='The full path to which ' 'the output file should be saved. Defaults", "'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX',", "dict) and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair}", "'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC',", "args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD, start_date=START_DATE, end_date=END_DATE, logger=logger", "x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in row] for", "[{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None):", "use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self,", "'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS',", "'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC',", "len(AVAILABLE_CURRENCY_PAIRS), 3)] setup = [[str(e) for e in row] for row in chunks]", "type=str, default=None, help='The end date for the data pull in the format YYYY-MM-DD.", "\"Start\" and \"end\" are given in UNIX timestamp format and used to specify", "1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to", "api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp =", "candlestick chart data. Required GET parameters are \"currencyPair\", \"period\" (candlestick period in seconds;", "'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC',", "type=str, default=None, help='The full path to which ' 'the output file should be", "__name__ == '__main__': DESCRIPTION = \"\"\" A simple tool to pull price data", "...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair =", "'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC',", "self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination = destination self.data", "data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume',", "data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage']", "else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination = destination", "'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC',", "inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume',", "time import logging import pandas as pd import requests from pathlib import Path", "== '__main__': DESCRIPTION = \"\"\" A simple tool to pull price data from", "parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs", "300 self.api = api self.destination = destination self.data = None self.logger = logger", "type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which ' 'the output", "format YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None,", "period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date:", "given in UNIX timestamp format and used to specify the date range for", "logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS),", "to view currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs:", "= currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp", "'when pulling data in seconds. Defaults to 14400. Available options' \\ ' 300,", "import logging import pandas as pd import requests from pathlib import Path from", "900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path", "pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use", "' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}')", "the format YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str,", "the data pull in the format YYYY-MM-DD. Defaults ' \\ 'to 30 days", "'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart", "= data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def", "'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False)", "args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup", "in chunks] lens = [max(map(len, col)) for col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x)", "'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA',", "'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS',", "valid values are 300, 900, 1800, 7200, 14400, and 86400), \"start\", and \"end\".", "as pd import requests from pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS", "\"currencyPair\", \"period\" (candlestick period in seconds; valid values are 300, 900, 1800, 7200,", "def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True):", "'the output file should be saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date',", "\\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else:", "help='The end date for the data pull in the format YYYY-MM-DD. Defaults '", "data can be saved down as a csv or used in memory as", "'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP',", "down as a csv or used in memory as a pandas DataFrame. Poloneix", "import requests from pathlib import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP',", "Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use '", "in row] for row in chunks] lens = [max(map(len, col)) for col in", "data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] = data['datetime'] cols = ['datetime_utc', 'open', 'high',", "CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required GET", "self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else:", "values are 300, 900, 1800, 7200, 14400, and 86400), \"start\", and \"end\". \"Start\"", "SAVE and _dest is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION", "if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency pair.':", "the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The start", "'__main__': DESCRIPTION = \"\"\" A simple tool to pull price data from Poloneix's", "\"period\" (candlestick period in seconds; valid values are 300, 900, 1800, 7200, 14400,", "f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date END_DATE = args.end_date client =", "'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/", "to which ' 'the output file should be saved. Defaults to the home", "self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else: return self.data if __name__ == '__main__':", "'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK',", "format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag", "def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp) data.sort_values('datetime', inplace=True) data['datetime_utc'] =", "self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self else:", "dest='period', default=14400, help='The timefrime to use ' 'when pulling data in seconds. Defaults", "for row in chunks] lens = [max(map(len, col)) for col in zip(*setup)] fmt", "'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC',", "help='A poloneix currency pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The", "return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is None: response", "end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api =", "= args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD, start_date=START_DATE, end_date=END_DATE, logger=logger ) client.run(save=SAVE)", "'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ',", "'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM',", "'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH',", "data pull in the format YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.')", "logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if", "wait_random_min=1000, wait_random_max=2000) def run(self, save=True): if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response))", "args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x +", "'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT',", "saved. Defaults to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30),", "#! /usr/bin/env python import argparse import datetime import json import time import logging", "import argparse import datetime import json import time import logging import pandas as", "None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return", "'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR',", "self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination = destination self.data = None", "Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end", "f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return parsed_data", "'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH',", "'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object):", "int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self, response): parsed_data", "to the home directory.') parser.add_argument('--start-date', dest='start_date', type=str, default=datetime.datetime.strftime( datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'), help='The", "view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use ' 'when pulling data", "None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE =", "def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys():", "in the format YYYY-MM-DD. Defaults ' \\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date',", "data['datetime'] cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data =", "pulling data in seconds. Defaults to 14400. Available options' \\ ' 300, 900,", "= parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3]", "used in memory as a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser =", "_dest START_DATE = args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD,", "'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR', 'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC', 'ETH_ETC',", "'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',]", "format='%Y-%m-%d'), help='The start date for the data pull in the format YYYY-MM-DD. Defaults", "days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for the data pull", "date for the data pull in the format YYYY-MM-DD. Defaults ' \\ 'to", "timestamp format and used to specify the date range for the data returned.", "'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT',", "now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to view currency pairs.') args", "['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC',", "'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR',", "date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def", "return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response", "args.dest if SAVE and _dest is None: home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv'", "= str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date END_DATE", "parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which ' 'the output file", "Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required GET parameters are \"currencyPair\",", "'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for the", "if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save: self.save_data(self.data) return self", "'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self", "'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE',", "parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for the data pull in the", "view currency pairs.') args = parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks", "self.destination = destination self.data = None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\", "= [[str(e) for e in row] for row in chunks] lens = [max(map(len,", "' \\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to view", "'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP',", "in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD = args.period _dest", "'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT',", "args.currency_pair SAVE = True PERIOD = args.period _dest = args.dest if SAVE and", "30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for the data", "home_dir = str(Path.home()) DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date", "3)] setup = [[str(e) for e in row] for row in chunks] lens", "https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required GET parameters are \"currencyPair\", \"period\"", "= '\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row) for row in setup]", "parsed_data['error'] == 'Invalid currency pair.': raise Exception(f'{self.currency_pair} is not a valid currency pair.", "for the data pull in the format YYYY-MM-DD. Defaults ' \\ 'to 30", "\"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ## returnChartData Returns candlestick chart data. Required GET parameters", "if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response", "'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS',", "'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP',", "returnChartData Returns candlestick chart data. Required GET parameters are \"currencyPair\", \"period\" (candlestick period", "for x in lens) table = [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR", "currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp =", "[fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD", "a pandas DataFrame. Poloneix Documentation: https://poloniex.com/support/api/ \"\"\" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC',", "import Path from retrying import retry AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY',", "None self.logger = logger self.url = f'https://poloniex.com/public?command={self.api}&currencyPair' \\ f'={self.currency_pair}&start={self.start_timestamp}&end=' \\ f'{self.end_timestamp}&period={self.period}' def get_timestamp(self,", "= self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination = destination self.data =", "'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM',", "destination=None, api='returnChartData', logger=None): self.currency_pair = currency_pair.upper() self.start_timestamp = self.get_timestamp(date_string=start_date) if not end_date: self.end_timestamp", "+ datetime.timedelta(-30), format='%Y-%m-%d'), help='The start date for the data pull in the format", "else: DESTINATION = _dest START_DATE = args.start_date END_DATE = args.end_date client = CryptoData(", "used to specify the date range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644,", "self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api = api", "\\ 'to 30 days ago.') parser.add_argument('--end-date', dest='end_date', type=str, default=None, help='The end date for", "if not end_date: self.end_timestamp = 9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300", "col in zip(*setup)] fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens) table = [fmt.format(*row)", "json import time import logging import pandas as pd import requests from pathlib", "= f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv' else: DESTINATION = _dest START_DATE = args.start_date END_DATE = args.end_date client", "'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC',", "'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class", "'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS', 'BTC_VIA', 'BTC_VRC',", "chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)] setup =", "saved down as a csv or used in memory as a pandas DataFrame.", "= args.dest if SAVE and _dest is None: home_dir = str(Path.home()) DESTINATION =", "to specify the date range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015},", "parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC', type=str, help='A poloneix currency pair. Use --pairs to view pairs')", "pair.': raise Exception(f'{self.currency_pair} is not a valid currency pair. ' \\ f'You must", "'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME',", "\\ 'to now.') parser.add_argument('--pairs', dest='pairs', action='store_true', default=False, help='A flag used to view currency", "import time import logging import pandas as pd import requests from pathlib import", "'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH', 'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR', 'USDT_XMR', 'USDT_XRP',", "\"end\". \"Start\" and \"end\" are given in UNIX timestamp format and used to", "else: return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple())) def get_api_data(self): response = requests.get(self.url) return response def parse_api_data_text(self,", "\"\"\" A simple tool to pull price data from Poloneix's API. The data", "--pairs to view pairs') parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use ' 'when", "pandas as pd import requests from pathlib import Path from retrying import retry", "'USDT_STR', 'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH', 'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP',", "\"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None, period=14400, destination=None, api='returnChartData', logger=None): self.currency_pair", "not a valid currency pair. ' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}')", "self.data if __name__ == '__main__': DESCRIPTION = \"\"\" A simple tool to pull", "def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'): if date_string is None: return int(time.mktime(datetime.datetime.utcnow().timetuple())) else: return int(time.mktime(datetime.datetime.strptime(date_string,", "parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error'] ==", "in seconds; valid values are 300, 900, 1800, 7200, 14400, and 86400), \"start\",", "currency pair.': raise Exception(f'{self.currency_pair} is not a valid currency pair. ' \\ f'You", "'BTC_BTS', 'BTC_BURST', 'BTC_CLAM', 'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2', 'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT',", "= args.period _dest = args.dest if SAVE and _dest is None: home_dir =", "END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD, start_date=START_DATE, end_date=END_DATE, logger=logger )", "date range for the data returned. Sample output: [{\"date\":1405699200,\"high\":0.0045388,\"low\":0.00403001,\"open\":0.00404545,\"close\":0.00427592,\"volume\":44.11655644, \"quoteVolume\":10259.29079097,\"weightedAverage\":0.00430015}, ...] \"\"\" def", "START_DATE = args.start_date END_DATE = args.end_date client = CryptoData( currency_pair=CURRENCY_PAIR, destination=DESTINATION, period=PERIOD, start_date=START_DATE,", "The data can be saved down as a csv or used in memory", "end date for the data pull in the format YYYY-MM-DD. Defaults ' \\", "'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',] class CryptoData(object): \"\"\" Poloneix Documentation: https://poloniex.com/support/api/ ##", "'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC',", "parser.parse_args() logger = logging.getLogger(__name__) logger.setLevel('INFO') if args.pairs: chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for", "'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination,", "'ETH_BCH', 'ETH_CVC', 'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH',", "'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME', 'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK', 'BTC_LTC', 'BTC_MAID',", "a valid currency pair. ' \\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else:", "lens) table = [fmt.format(*row) for row in setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE", "'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT', 'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS', 'BTC_SC',", "def run(self, save=True): if self.data is None: response = self.get_api_data() self.build_dataframe(self.parse_api_data_text(response)) if save:", "'open', 'high', 'low', 'close', 'quoteVolume', 'volume', 'weightedAverage'] self.data = data[cols] def save_data(self, dataframe):", "9999999999 else: self.end_timestamp = self.get_timestamp(date_string=end_date) self.period = 300 self.api = api self.destination =", "\\ f'You must use one of: \\n{AVAILABLE_CURRENCY_PAIRS}') else: raise Exception(f'API Error: {parsed_data[\"error\"]}') return", "self.data = data[cols] def save_data(self, dataframe): dataframe.to_csv(self.destination, index=False) return self @retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000)", "setup] print('\\n'.join(table)) CURRENCY_PAIR = args.currency_pair SAVE = True PERIOD = args.period _dest =", "'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP', 'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH',", "Returns candlestick chart data. Required GET parameters are \"currencyPair\", \"period\" (candlestick period in", "Error: {parsed_data[\"error\"]}') return parsed_data def build_dataframe(self, parsed_data): data = pd.DataFrame(parsed_data) data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp)", "response def parse_api_data_text(self, response): parsed_data = json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in", "data pull in the format YYYY-MM-DD. Defaults ' \\ 'to now.') parser.add_argument('--pairs', dest='pairs',", "SAVE = True PERIOD = args.period _dest = args.dest if SAVE and _dest", "= ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA', 'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM',", "type=str, help='A poloneix currency pair. Use --pairs to view pairs') parser.add_argument('--period', dest='period', default=14400,", "import datetime import json import time import logging import pandas as pd import", "and 86400), \"start\", and \"end\". \"Start\" and \"end\" are given in UNIX timestamp", "json.loads(response.text) if isinstance(parsed_data, dict) and 'error' in parsed_data.keys(): if parsed_data['error'] == 'Invalid currency", "if save: self.save_data(self.data) return self else: return self.data if __name__ == '__main__': DESCRIPTION", "300, 900, 1800, 7200, 14400, 86400.', type=int) parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full" ]
[ "gt_names def get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path, _ = os.path.split(train_path)", "path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for", "pcl_points: x = point[0] y = point[1] z = point[2] if abs(x) <", "open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs =", "annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir =", "= pcl.load_XYZI(pcd_path) points = [] for point in pcl_points: x = point[0] y", "and \\ (box[1] >= -2.5) and (box[1] <= 2.5): continue if (box[0] >=", "for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path)", "getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir,", "as file: for line in file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path):", "os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir,", "point[2] if abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1:", "pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename,", "points = [] for point in pcl_points: x = point[0] y = point[1]", "= json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs = [] for box_value", "count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file)", "[] annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir", "os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename", "\".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\")", "= point[0] y = point[1] z = point[2] if abs(x) < 1e-1 and", "import os import sys sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy as", "show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in", "# import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as", "from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line", "def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path):", "json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs = [] for box_value in", "2.5): continue if (box[0] >= -41) and (box[0] <= 41) and \\ (box[1]", "def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for point in pcl_points: x", "as np import json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath):", "and (box[1] <= 2.5): continue if (box[0] >= -41) and (box[0] <= 41)", "pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list))", "import sys sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy as np import", "%s not exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path)", "box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'],", "yaw] if (box[0] >= -1.5) and (box[0] <= 1.5) and \\ (box[1] >=", "(annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\",", "print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not", "yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for point", "post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path", "os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path):", "points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file =", "points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject']", "result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path,", "annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points,", "gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post =", "box_names = [] box_locs = [] for box_value in object_list: if box_value['class'].strip() !=", "os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post =", "encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs = []", "line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for point in", "point[1] z = point[2] if abs(x) < 1e-1 and abs(y) < 1e-1 and", ">= -2.5) and (box[1] <= 2.5): continue if (box[0] >= -41) and (box[0]", "if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse clockwise box = [box_value['centerX'],", "(box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes,", "= os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename =", "with open(dataFilePath, 'r') as file: for line in file: if line.strip(): yield line.strip()", "np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8')", "object_list = result['objects']['rect3DObject'] box_names = [] box_locs = [] for box_value in object_list:", "= \".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path,", "-box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw]", "(box[1] >= -2.5) and (box[1] <= 2.5): continue if (box[0] >= -41) and", "-41) and (box[0] <= 41) and \\ (box[1] >= -81) and (box[1] <=", "get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir =", "= get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path)", "cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list:", "(box[0] <= 1.5) and \\ (box[1] >= -2.5) and (box[1] <= 2.5): continue", "of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes,", "print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names) if __name__", "def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names", "if (box[0] >= -41) and (box[0] <= 41) and \\ (box[1] >= -81)", "in file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points", "and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return", "os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path):", "= [] for point in pcl_points: x = point[0] y = point[1] z", "if (box[0] >= -1.5) and (box[0] <= 1.5) and \\ (box[1] >= -2.5)", "box_value['height'], yaw] if (box[0] >= -1.5) and (box[0] <= 1.5) and \\ (box[1]", "% (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of", "point[2], point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path),", "numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def", "4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list", "os import sys sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy as np", "41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def", "= np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result = []", "json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r')", "1.5) and \\ (box[1] >= -2.5) and (box[1] <= 2.5): continue if (box[0]", "= read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names) if __name__ == '__main__':", "annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post)", "line in file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path)", "file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points =", "-2.5) and (box[1] <= 2.5): continue if (box[0] >= -41) and (box[0] <=", "object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse clockwise box =", "z = point[2] if abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z)", "in object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse clockwise box", "os.getcwd() + \"/.\") import math import numpy as np import json # import", "-81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names)", "sys sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy as np import json", "numpy as np import json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def", "# inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if", "def get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir", ">= -41) and (box[0] <= 41) and \\ (box[1] >= -81) and (box[1]", "infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names", "import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line in file:", "(box[0] >= -41) and (box[0] <= 41) and \\ (box[1] >= -81) and", "[box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and (box[0]", "dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result =", "\"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post)", "pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path))", "abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points", "else: print(\"%s or %s not exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path):", "point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile(", "result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\" % (annotation_path, pcd_path)) return result", "box_locs = [] for box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw =", "import numpy as np import json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox", "for line in file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points =", "box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse clockwise", ">= -1.5) and (box[0] <= 1.5) and \\ (box[1] >= -2.5) and (box[1]", "or %s not exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list =", "= os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post", "result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs = [] for", "< 1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points =", "in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names)", "continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points", "import json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath,", "box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and (box[0] <= 1.5) and", "return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for point in pcl_points:", "read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file", "for box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] # inverse", "gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path, _ =", "for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post", "in pcl_points: x = point[0] y = point[1] z = point[2] if abs(x)", "annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path)", "points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points =", "_ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post", "yaw = -box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'],", "import math import numpy as np import json # import pcl from second.pytorch.show_3dbox", "def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line in file: if line.strip():", "print(\"%s or %s not exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list", "result = [] annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path,", "len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names =", "= point[1] z = point[2] if abs(x) < 1e-1 and abs(y) < 1e-1", "\\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\" % (annotation_path, pcd_path))", "box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result", "read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names) if __name__ == '__main__': show_annotations(\"/home/lpj/github/data/my_point_cloud/ali_dataset/ImageSets/Pedestrian_train.txt\")", "\"/.\") import math import numpy as np import json # import pcl from", "os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\" %", "if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = []", "< 1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1],", "!= 'DontCare': yaw = -box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'],", "np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path,", "41) and \\ (box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes", "sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy as np import json #", "1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points)", "\\ (box[1] >= -2.5) and (box[1] <= 2.5): continue if (box[0] >= -41)", "second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line in", "and (box[0] <= 1.5) and \\ (box[1] >= -2.5) and (box[1] <= 2.5):", "getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line in file: if line.strip(): yield", "y = point[1] z = point[2] if abs(x) < 1e-1 and abs(y) <", "= filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) #", "and (box[0] <= 41) and \\ (box[1] >= -81) and (box[1] <= 41):", "def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path", "1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2],", "<= 1.5) and \\ (box[1] >= -2.5) and (box[1] <= 2.5): continue if", "points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names) if __name__ ==", "(box[0] <= 41) and \\ (box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip())", "read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for point in pcl_points: x =", "(box[0] >= -1.5) and (box[0] <= 1.5) and \\ (box[1] >= -2.5) and", "read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names =", "exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number", "= [] annotation_post = \".json\" path, _ = os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\")", "line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points = pcl.load_XYZI(pcd_path) points = [] for", "annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename", "in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path =", "gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result =", "pcd_path)) else: print(\"%s or %s not exist\" % (annotation_path, pcd_path)) return result def", "= [] for box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw']", "filename, post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename)", "number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path)", "return gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post = \".json\" path, _", "= np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4])", "(box[1] <= 2.5): continue if (box[0] >= -41) and (box[0] <= 41) and", "filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path)", "-1.5) and (box[0] <= 1.5) and \\ (box[1] >= -2.5) and (box[1] <=", "clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >=", "1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path):", "point[3]]) numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32,", "<= 2.5): continue if (box[0] >= -41) and (box[0] <= 41) and \\", "for point in pcl_points: x = point[0] y = point[1] z = point[2]", "< 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points = np.array(points) return numpy_points def", "= [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and", "continue if (box[0] >= -41) and (box[0] <= 41) and \\ (box[1] >=", "and \\ (box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes =", "os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s", "os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\" % (annotation_path, pcd_path)) return", "result['objects']['rect3DObject'] box_names = [] box_locs = [] for box_value in object_list: if box_value['class'].strip()", "math import numpy as np import json # import pcl from second.pytorch.show_3dbox import", "[] for point in pcl_points: x = point[0] y = point[1] z =", "= open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = [] box_locs", "[] for box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw = -box_value['yaw'] #", "abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1: continue points.append([point[0],", "filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path", "<= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names", "pcd_path in cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes,", "<reponame>lpj0822/pointpillars_train<filename>second/pytorch/annotations_process.py import os import sys sys.path.insert(0, os.getcwd() + \"/.\") import math import numpy", "box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path):", "annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and", "np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post", "\"../Annotations\") for filename_and_post in getFileData(train_path): filename, post = os.path.splitext(filename_and_post) annotation_filename = filename +", "get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points", "pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for", "if abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1: continue", "file: for line in file: if line.strip(): yield line.strip() return def read_pcd_points(pcd_path): pcl_points", "= np.array(box_names) return gt_boxes, gt_names def get_image_and_label_list(train_path): result = [] annotation_post = \".json\"", "if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\"", "+ annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if", "pcl.load_XYZI(pcd_path) points = [] for point in pcl_points: x = point[0] y =", "mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file: for line in file: if", "my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list = result['objects']['rect3DObject'] box_names = []", "[] box_locs = [] for box_value in object_list: if box_value['class'].strip() != 'DontCare': yaw", "pcl_points = pcl.load_XYZI(pcd_path) points = [] for point in pcl_points: x = point[0]", "inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0]", "cloud_and_label_list: print(pcd_path) points = read_bin_points(pcd_path) gt_boxes, gt_names = read_annotations_data(annotation_path) mayavi_show_3dbox(points, gt_boxes, gt_names) if", "<= 41) and \\ (box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box)", "and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names = np.array(box_names) return", "not exist\" % (annotation_path, pcd_path)) return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain", "'r') as file: for line in file: if line.strip(): yield line.strip() return def", "point[0] y = point[1] z = point[2] if abs(x) < 1e-1 and abs(y)", "= os.path.join(annotation_dir, annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\", "numpy_points = np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1,", "and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s not exist\" % (annotation_path,", "'DontCare': yaw = -box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'],", "box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and (box[0] <= 1.5)", "point in pcl_points: x = point[0] y = point[1] z = point[2] if", "(box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names", "= os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else:", "annotation_filename) pcd_path = os.path.join(pcd_dir, filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path,", "open(dataFilePath, 'r') as file: for line in file: if line.strip(): yield line.strip() return", "np import json # import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with", "= result['objects']['rect3DObject'] box_names = [] box_locs = [] for box_value in object_list: if", "= -box_value['yaw'] # inverse clockwise box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'],", "return result def show_annotations(info_path): cloud_and_label_list = get_image_and_label_list(info_path) print(\"remain number of infos:\", len(cloud_and_label_list)) for", "filename_and_post) # print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or", "box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and (box[0] <=", "= os.path.split(train_path) pcd_dir = os.path.join(path, \"../pcds\") annotation_dir = os.path.join(path, \"../Annotations\") for filename_and_post in", "x = point[0] y = point[1] z = point[2] if abs(x) < 1e-1", "= np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path,", "return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result = json.load(my_file) object_list =", "\\ (box[1] >= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32)", "np.array(points) return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return", "return numpy_points def read_bin_points(bin_path): points = np.fromfile( str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points", ">= -81) and (box[1] <= 41): box_names.append(box_value['class'].strip()) box_locs.append(box) gt_boxes = np.array(box_locs).astype(np.float32) gt_names =", "= os.path.splitext(filename_and_post) annotation_filename = filename + annotation_post annotation_path = os.path.join(annotation_dir, annotation_filename) pcd_path =", "# print(pcd_path) if os.path.exists(annotation_path) and \\ os.path.exists(pcd_path): result.append((annotation_path, pcd_path)) else: print(\"%s or %s", "abs(y) < 1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]]) numpy_points", "+ \"/.\") import math import numpy as np import json # import pcl", "import pcl from second.pytorch.show_3dbox import mayavi_show_3dbox def getFileData(dataFilePath): with open(dataFilePath, 'r') as file:", "and abs(y) < 1e-1 and abs(z) < 1e-1: continue points.append([point[0], point[1], point[2], point[3]])", "print(\"remain number of infos:\", len(cloud_and_label_list)) for annotation_path, pcd_path in cloud_and_label_list: print(pcd_path) points =", "str(bin_path), dtype=np.float32, count=-1).reshape([-1, 4]) return points def read_annotations_data(annotation_path): my_file = open(annotation_path, encoding='utf-8') result", "box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5) and (box[0] <= 1.5) and \\", "= point[2] if abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z) <", "box = [box_value['centerX'], box_value['centerY'], box_value['centerZ'], box_value['width'], box_value['length'], box_value['height'], yaw] if (box[0] >= -1.5)", "= [] box_locs = [] for box_value in object_list: if box_value['class'].strip() != 'DontCare':" ]
[ "python # Relay node takes a list of topics and republish prepending /record", "the list of topics to relay from rosparam publishers=[] subscribers=[] # Manually list", "topics to Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\"", "the topics to Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for", "def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to a", "# Manually list the topics to Relay topics=['/emg'] for topic in topics: #relay", "class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub = rospy.Subscriber(topic,", "a list of topics and republish prepending /record namespace import rospy import rostopic", "publishers=[] subscribers=[] # Manually list the topics to Relay topics=['/emg'] for topic in", "rospy.init_node('talker', anonymous=True) # Get the list of topics to relay from rosparam publishers=[]", "signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to a publisher ''' pub.publish(msg) rospy.init_node('talker',", "list of topics to relay from rosparam publishers=[] subscribers=[] # Manually list the", "import signal import sys QUEUE_SIZE=1000 #Make sure we don't miss points def signal_handler(sig,frame):", "#Make sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg):", "to relay from rosparam publishers=[] subscribers=[] # Manually list the topics to Relay", "''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of topics to relay from", "rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub = rospy.Subscriber(topic, topicClass,callback) publishers.append(pub) subscribers.append(sub) rospy.spin()", "sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): '''", "namespace import rospy import rostopic import signal import sys QUEUE_SIZE=1000 #Make sure we", "topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass,", "takes a list of topics and republish prepending /record namespace import rospy import", "republish prepending /record namespace import rospy import rostopic import signal import sys QUEUE_SIZE=1000", "Manually list the topics to Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True)", "for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg)", "points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to", "miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message", "to Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with", "= rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub = rospy.Subscriber(topic, topicClass,callback) publishers.append(pub) subscribers.append(sub)", "rospy import rostopic import signal import sys QUEUE_SIZE=1000 #Make sure we don't miss", "signal import sys QUEUE_SIZE=1000 #Make sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c')", "with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub =", "Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class", "for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub", "publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of topics to relay", "Relay node takes a list of topics and republish prepending /record namespace import", "and republish prepending /record namespace import rospy import rostopic import signal import sys", "# Get the list of topics to relay from rosparam publishers=[] subscribers=[] #", "anonymous=True) # Get the list of topics to relay from rosparam publishers=[] subscribers=[]", "print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to a publisher '''", "import rostopic import signal import sys QUEUE_SIZE=1000 #Make sure we don't miss points", "the message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list", "subscribers=[] # Manually list the topics to Relay topics=['/emg'] for topic in topics:", "list the topics to Relay topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay", "import sys QUEUE_SIZE=1000 #Make sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0)", "#!/usr/bin/env python # Relay node takes a list of topics and republish prepending", "def echo(pub,msg): ''' echos the message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True)", "echo(pub,msg): ''' echos the message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) #", "sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to a publisher ''' pub.publish(msg)", "relay from rosparam publishers=[] subscribers=[] # Manually list the topics to Relay topics=['/emg']", "don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the", "Get the list of topics to relay from rosparam publishers=[] subscribers=[] # Manually", "of topics to relay from rosparam publishers=[] subscribers=[] # Manually list the topics", "rostopic import signal import sys QUEUE_SIZE=1000 #Make sure we don't miss points def", "of topics and republish prepending /record namespace import rospy import rostopic import signal", "sys QUEUE_SIZE=1000 #Make sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler)", "in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName,", "echos the message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the", "\"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub", "topics and republish prepending /record namespace import rospy import rostopic import signal import", "QUEUE_SIZE=1000 #Make sure we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def", "rosparam publishers=[] subscribers=[] # Manually list the topics to Relay topics=['/emg'] for topic", "to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of topics", "signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos the message to a publisher", "topics to relay from rosparam publishers=[] subscribers=[] # Manually list the topics to", "list of topics and republish prepending /record namespace import rospy import rostopic import", "''' echos the message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get", "pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of topics to relay from rosparam", "\"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub = rospy.Subscriber(topic, topicClass,callback)", "prepending /record namespace import rospy import rostopic import signal import sys QUEUE_SIZE=1000 #Make", "message to a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of", "from rosparam publishers=[] subscribers=[] # Manually list the topics to Relay topics=['/emg'] for", "pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg: echo(pub,msg) sub = rospy.Subscriber(topic, topicClass,callback) publishers.append(pub)", "topics=['/emg'] for topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass))", "(topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda", "a publisher ''' pub.publish(msg) rospy.init_node('talker', anonymous=True) # Get the list of topics to", "# Relay node takes a list of topics and republish prepending /record namespace", "we don't miss points def signal_handler(sig,frame): print('Ctrl+c') sys.exit(0) signal.signal(signal.SIGINT,signal_handler) def echo(pub,msg): ''' echos", "topic in topics: #relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub =", "/record namespace import rospy import rostopic import signal import sys QUEUE_SIZE=1000 #Make sure", "node takes a list of topics and republish prepending /record namespace import rospy", "#relay (topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True) print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE)", "import rospy import rostopic import signal import sys QUEUE_SIZE=1000 #Make sure we don't", "print(\"Relay for \"+topicName+\" with class \"+str(topicClass)) pub = rospy.Publisher(\"/record\"+topicName, topicClass, queue_size=QUEUE_SIZE) callback=lambda msg:" ]
[ "is None: return None for u in self._text_units: if u.regexp.search(unit): return u raise", "not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num): match =", "unit): # pragma: no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit)", ".error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units):", "\"\"\" .. codeauthor:: <NAME> <<EMAIL>> \"\"\" import abc import re from decimal import", "available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value must be", "self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number,", "@abc.abstractmethod def get_as(self, unit): # pragma: no cover pass def __init__(self, readable_value, default_unit=None):", "if from_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return", "unit) except TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def", "pass @abc.abstractproperty def _units(self): # pragma: no cover pass @abc.abstractmethod def get_as(self, unit):", "import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return", "RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER =", "def _normalize_unit(self, unit): if unit is None: return None for u in self._text_units:", "if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if unit is None:", "import abc import re from decimal import Decimal from typepy import RealNumber, String", "re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta):", "items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if", "return None for u in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not", "except TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self,", "is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit)", "unit is None: return None for u in self._text_units: if u.regexp.search(unit): return u", "abc import re from decimal import Decimal from typepy import RealNumber, String from", "__repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit):", "for u in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit))", "import RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER", "in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return", "if readable_value is None: raise TypeError(\"readable_value must be a string\") number, from_unit =", "\".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma:", "cover pass @abc.abstractproperty def _units(self): # pragma: no cover pass @abc.abstractmethod def get_as(self,", "def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self):", "if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units),", "cover pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value)", "for unit in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type():", "= self.__split_unit(readable_value) if number is not None: number = self.__to_number(number) if from_unit is", "self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if", "__preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value must be a string\") number,", "self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \"", "if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for unit in self._units:", "if not RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue raise UnitNotFoundError( \"unit", "self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name)", "\"\"\" import abc import re from decimal import Decimal from typepy import RealNumber,", "raise TypeError(\"readable_value must be a string\") number, from_unit = self.__split_unit(readable_value) if number is", "return (number, unit) except TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units),", "from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def", "typepy import RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\")", "__split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit not found\",", "unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue raise UnitNotFoundError(", "self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value):", "raise TypeError(\"readable_value must be a string\") for unit in self._units: try: if unit.regexp.search(readable_value):", "\"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num): match", "# pragma: no cover pass @abc.abstractproperty def _units(self): # pragma: no cover pass", "text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover pass @abc.abstractproperty def", "if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit) except", "number is not None: number = self.__to_number(number) if from_unit is None: raise UnitNotFoundError(", "readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value,", "continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if", "u in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit)) def", ") return (number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match:", "import Decimal from typepy import RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS", "readable_value): if readable_value is None: raise TypeError(\"readable_value must be a string\") number, from_unit", "= [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if unit", ".. codeauthor:: <NAME> <<EMAIL>> \"\"\" import abc import re from decimal import Decimal", "pass @abc.abstractmethod def get_as(self, unit): # pragma: no cover pass def __init__(self, readable_value,", "must be a string\") number, from_unit = self.__split_unit(readable_value) if number is not None:", "not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise", "not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for unit in self._units: try:", "unit): if unit is None: return None for u in self._text_units: if u.regexp.search(unit):", "self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if unit is None: return", "decimal import Decimal from typepy import RealNumber, String from .error import ParameterError, UnitNotFoundError", "UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num):", "def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty", "RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), )", "found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value", "number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue", "_normalize_unit(self, unit): if unit is None: return None for u in self._text_units: if", "def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self,", "None: return None for u in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit", "number = self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value,", "value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value must", "readable_value is None: raise TypeError(\"readable_value must be a string\") number, from_unit = self.__split_unit(readable_value)", "@abc.abstractproperty def _units(self): # pragma: no cover pass @abc.abstractmethod def get_as(self, unit): #", "TypeError(\"readable_value must be a string\") number, from_unit = self.__split_unit(readable_value) if number is not", "self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def", "in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self,", "codeauthor:: <NAME> <<EMAIL>> \"\"\" import abc import re from decimal import Decimal from", "<<EMAIL>> \"\"\" import abc import re from decimal import Decimal from typepy import", "raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is", "no cover pass @abc.abstractmethod def get_as(self, unit): # pragma: no cover pass def", "_RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in text_units.values()])", "_BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values)", "unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit) except TypeError:", "HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover pass @abc.abstractproperty def _units(self): #", "pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def", "found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError(", "found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must", "pragma: no cover pass @abc.abstractmethod def get_as(self, unit): # pragma: no cover pass", "default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)]", "string\") number, from_unit = self.__split_unit(readable_value) if number is not None: number = self.__to_number(number)", "not match: raise ParameterError( \"human-readable value should only include a number\", value=readable_num )", "UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\",", "value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if", "= self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name:", "not RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue raise UnitNotFoundError( \"unit not", "raise ParameterError( \"human-readable value should only include a number\", value=readable_num ) return Decimal(match.group())", "(number, unit) except TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), )", "def __preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value must be a string\")", "= self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units),", "readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value should only", "<NAME> <<EMAIL>> \"\"\" import abc import re from decimal import Decimal from typepy", "if unit is None: return None for u in self._text_units: if u.regexp.search(unit): return", "no cover pass @abc.abstractproperty def _units(self): # pragma: no cover pass @abc.abstractmethod def", "return u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if", ") return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\")", "be a string\") for unit in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0]", "from typepy import RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\",", "for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover", "UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is", "String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\")", "_units(self): # pragma: no cover pass @abc.abstractmethod def get_as(self, unit): # pragma: no", "a string\") number, from_unit = self.__split_unit(readable_value) if number is not None: number =", "available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not", "= _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value should only include a", "cover pass @abc.abstractmethod def get_as(self, unit): # pragma: no cover pass def __init__(self,", "pragma: no cover pass @abc.abstractproperty def _units(self): # pragma: no cover pass @abc.abstractmethod", "available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a", "match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value should only include", "self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return", "items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if unit is None: return None", "re from decimal import Decimal from typepy import RealNumber, String from .error import", "not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value", "value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be", "self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), )", "(readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for unit", "_get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def", "def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit not", "# pragma: no cover pass @abc.abstractmethod def get_as(self, unit): # pragma: no cover", "= (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for", "u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit", "if self._default_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return", "raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value", "is None: raise TypeError(\"readable_value must be a string\") number, from_unit = self.__split_unit(readable_value) if", "[str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items) def _normalize_unit(self, unit): if unit is", "= re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in text_units.values()]) class", "RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue raise UnitNotFoundError( \"unit not found\",", "None: number = self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit not found\",", "\", \".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): #", "is not None: number = self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit", "in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover pass @abc.abstractproperty", "return \", \".join([\", \".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self):", "\".join(values) for values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no", "\"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise", "if not match: raise ParameterError( \"human-readable value should only include a number\", value=readable_num", "from decimal import Decimal from typepy import RealNumber, String from .error import ParameterError,", "self._default_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value,", "found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num)", "return (readable_value, self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for", "__to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value should", "return \" \".join(items) def _normalize_unit(self, unit): if unit is None: return None for", "from_unit is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number,", "ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None:", "string\") for unit in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not", "= unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit) except TypeError: continue raise", "no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit =", "def get_as(self, unit): # pragma: no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit", "ParameterError, UnitNotFoundError _BASE_ATTRS = (\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \",", "self._default_unit) if not String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for unit in", "a string\") for unit in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if", "get_as(self, unit): # pragma: no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit =", "def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value", "class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover pass @abc.abstractproperty def _units(self):", "readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items =", "{}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type(): if self._default_unit is None: raise UnitNotFoundError( \"unit", "not None: number = self.__to_number(number) if from_unit is None: raise UnitNotFoundError( \"unit not", "_RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable value should only include a number\",", "def _units(self): # pragma: no cover pass @abc.abstractmethod def get_as(self, unit): # pragma:", "None for u in self._text_units: if u.regexp.search(unit): return u raise ValueError(\"unit not found:", "None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if", "pragma: no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit", "(\"name\", \"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values", "\".join(items) def _normalize_unit(self, unit): if unit is None: return None for u in", "be a string\") number, from_unit = self.__split_unit(readable_value) if number is not None: number", "TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value):", ") def __preprocess(self, readable_value): if readable_value is None: raise TypeError(\"readable_value must be a", "None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def", "Decimal from typepy import RealNumber, String from .error import ParameterError, UnitNotFoundError _BASE_ATTRS =", "_text_units(self): # pragma: no cover pass @abc.abstractproperty def _units(self): # pragma: no cover", "if u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if", "\"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is None:", "is None: raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit)", "continue return (number, unit) except TypeError: continue raise UnitNotFoundError( \"unit not found\", value=readable_value,", "= self.__preprocess(readable_value) def __repr__(self): items = [str(self._number)] if self._from_unit.name: items.append(self._from_unit.name) return \" \".join(items)", "raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (number, from_unit) def __to_number(self,", "return (number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise", "from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError( \"human-readable", "must be a string\") for unit in self._units: try: if unit.regexp.search(readable_value): number =", "# pragma: no cover pass def __init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number,", "if number is not None: number = self.__to_number(number) if from_unit is None: raise", "\" \".join(items) def _normalize_unit(self, unit): if unit is None: return None for u", "@abc.abstractproperty def _text_units(self): # pragma: no cover pass @abc.abstractproperty def _units(self): # pragma:", "try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue return (number, unit)", "__init__(self, readable_value, default_unit=None): self._default_unit = self._normalize_unit(default_unit) self._number, self._from_unit = self.__preprocess(readable_value) def __repr__(self): items", "unit in self._units: try: if unit.regexp.search(readable_value): number = unit.regexp.split(readable_value)[0] if not RealNumber(number).is_type(): continue", "String(readable_value).is_type(): raise TypeError(\"readable_value must be a string\") for unit in self._units: try: if", "match: raise ParameterError( \"human-readable value should only include a number\", value=readable_num ) return", "import re from decimal import Decimal from typepy import RealNumber, String from .error", "raise UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not", "UnitNotFoundError( \"unit not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) return (readable_value, self._default_unit) if not String(readable_value).is_type():", "def _text_units(self): # pragma: no cover pass @abc.abstractproperty def _units(self): # pragma: no", "self.__split_unit(readable_value) if number is not None: number = self.__to_number(number) if from_unit is None:", "(number, from_unit) def __to_number(self, readable_num): match = _RE_NUMBER.search(readable_num) if not match: raise ParameterError(", "values in text_units.values()]) class HumanReadableValue(metaclass=abc.ABCMeta): @abc.abstractproperty def _text_units(self): # pragma: no cover pass", "u.regexp.search(unit): return u raise ValueError(\"unit not found: {}\".format(unit)) def __split_unit(self, readable_value): if RealNumber(readable_value).is_type():", "not found\", value=readable_value, available_units=_get_unit_msg(self._text_units), ) def __preprocess(self, readable_value): if readable_value is None: raise", "TypeError(\"readable_value must be a string\") for unit in self._units: try: if unit.regexp.search(readable_value): number", "\"regexp\") _RE_NUMBER = re.compile(r\"^[-\\+]?[0-9\\.]+$\") def _get_unit_msg(text_units): return \", \".join([\", \".join(values) for values in", "number, from_unit = self.__split_unit(readable_value) if number is not None: number = self.__to_number(number) if", "None: raise TypeError(\"readable_value must be a string\") number, from_unit = self.__split_unit(readable_value) if number", "from_unit = self.__split_unit(readable_value) if number is not None: number = self.__to_number(number) if from_unit" ]
[ "import show_link from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import", "= get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container),", "return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if", "\"\"\" zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen", "= item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return render_to_response", "= item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] =", "import item_comment from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import", "_ from dms.utils import show_link from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment", "= get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1", "dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import * # dms-Funktionen", "angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\"", "show_link from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import get_file_url", "dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import get_file_url from dms.gallery.utils", "vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return render_to_response ( 'base-full-width.html',", "from django.template import Context from django.utils.translation import ugettext as _ from dms.utils import", "import get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container):", "Inhalt eines Photos an Django content Management System <NAME> <EMAIL> Die Programme des", "parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments = ''", "from dms.gallery.utils import get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben # -----------------------------------------------------", "django.template import Context from django.utils.translation import ugettext as _ from dms.utils import show_link", "True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container)", "from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import get_file_url from", "= '' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container)", "file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name", "Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import", "django.shortcuts import render_to_response from django.template.loader import get_template from django.template import Context from django.utils.translation", "from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt", "'' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big']", "# ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container):", "----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\"", "'photo' parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments =", "Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt", "import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben", "normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle'", "+ '_middle' + file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments", "import ugettext as _ from dms.utils import show_link from dms.utils_form import get_item_vars_show from", "des Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader import get_template from django.template", "get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name", "import render_to_response from django.template.loader import get_template from django.template import Context from django.utils.translation import", "<filename>photo/views_show.py # -*- coding: utf-8 -*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines", "Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden.", "= show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2", "comments = item_comment(request, item_container=item_container) else: comments = '' vars = get_item_vars_show(request, item_container, app_name)", "Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader import get_template from", "import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import get_file_url from dms.gallery.utils import", "* # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines", "Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader", "\"\"\" from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import Context", "werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from", "vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] =", "# -*- coding: utf-8 -*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos", "dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007", "= file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo' parent =", "vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text']", "den Inhalt eines Photos an Django content Management System <NAME> <EMAIL> Die Programme", "der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] +", "vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more']", "den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007", "ugettext as _ from dms.utils import show_link from dms.utils_form import get_item_vars_show from dms.views_comment", "file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments:", "import Context from django.utils.translation import ugettext as _ from dms.utils import show_link from", "Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' +", "<EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend", "= item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] =", "= 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments", "from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import * #", "dms.views_comment import item_comment from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension", "Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des", "as _ from dms.utils import show_link from dms.utils_form import get_item_vars_show from dms.views_comment import", "item_comment(request, item_container=item_container) else: comments = '' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] =", "System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen", "vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return", "29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import", "photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die", "= get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] =", "get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'),", "item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return render_to_response (", "Context from django.utils.translation import ugettext as _ from dms.utils import show_link from dms.utils_form", "Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder \"\"\" file_name", "else: comments = '' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url']", "get_template from django.template import Context from django.utils.translation import ugettext as _ from dms.utils", "Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den", "get_item_vars_show from dms.views_comment import item_comment from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url", "_(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] =", "= get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] =", "dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\"", "ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\" def", "def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert", "/dms/photo/views_show.py .. zeigt den Inhalt eines Photos an Django content Management System <NAME>", "vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem =", "item_container=item_container) else: comments = '' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments", "item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True)", "parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments = '' vars = get_item_vars_show(request, item_container,", "dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den", "file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent()", "file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container)", "zeigt den Inhalt eines Photos an Django content Management System <NAME> <EMAIL> Die", "= item_comment(request, item_container=item_container) else: comments = '' vars = get_item_vars_show(request, item_container, app_name) vars['comments']", "-*- coding: utf-8 -*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos an", "den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen", "31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader import get_template", "from django.template.loader import get_template from django.template import Context from django.utils.translation import ugettext as", "get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\"", "zeigt den Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der", "= tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return render_to_response ( 'base-full-width.html', vars", "app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem", "show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url']", "comments = '' vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] =", "\"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder \"\"\" file_name =", "tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = '' return render_to_response ( 'base-full-width.html', vars )", "from dms.utils import show_link from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from", "dms.utils import show_link from dms.utils_form import get_item_vars_show from dms.views_comment import item_comment from dms.file.utils", "coding: utf-8 -*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos an Django", "render_to_response from django.template.loader import get_template from django.template import Context from django.utils.translation import ugettext", "<NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen", "eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder \"\"\"", "Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos]", "+ file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request,", "comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name']", "tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name']", "koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn", "item_comment from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import *", "import get_template from django.template import Context from django.utils.translation import ugettext as _ from", "from django.utils.translation import ugettext as _ from dms.utils import show_link from dms.utils_form import", "spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige", "Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst", "= get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name =", "eines Photos an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems", "get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email']", "django.utils.translation import ugettext as _ from dms.utils import show_link from dms.utils_form import get_item_vars_show", "die Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return", "der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response from", "0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response from django.template.loader import", "if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments = '' vars = get_item_vars_show(request,", "-*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos an Django content Management", "Photos an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen", ".. zeigt den Inhalt eines Photos an Django content Management System <NAME> <EMAIL>", "Inhalt eines Photos \"\"\" def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder", "get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos", "item_container.item.string_1 vars['email'] = item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars))", "dms.gallery.utils import get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def", "= comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html')", "frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der", "\"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos an Django content Management System", "vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url']", "\"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:]", "Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts import render_to_response", "und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02", "from django.shortcuts import render_to_response from django.template.loader import get_template from django.template import Context from", "vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True) tItem = get_template('app/photo/show_photo.html') vars['full_name'] = item_container.item.string_1 vars['email'] =", "get_file_url(item_container) ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo'", "0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes \"\"\" from django.shortcuts", "utf-8 -*- \"\"\" /dms/photo/views_show.py .. zeigt den Inhalt eines Photos an Django content", "vars = get_item_vars_show(request, item_container, app_name) vars['comments'] = comments vars['image_url'] = get_photo_name_middle(item_container) vars['image_url_big'] =", "from dms.views_comment import item_comment from dms.file.utils import get_file_url from dms.gallery.utils import get_exibition_url from", "des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01", "def get_photo_name_middle(item_container): \"\"\" ..liefert die Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container)", "..liefert die Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos = file_name.rfind('.')", "'_middle' + file_name[ext_pos:] app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments =", "an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei", "get_file_url from dms.gallery.utils import get_exibition_url from dms_ext.extension import * # dms-Funktionen ueberschreiben #", "item_container.item.string_2 vars['exibition_url'] = get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = ''", "app_name = 'photo' parent = item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else:", "# dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt eines Photos", "get_exibition_url(item_container) vars['name'] = item_container.item.name vars['text_more'] = tItem.render(Context(vars)) vars['text'] = '' vars['image_url'] = ''", "ext_pos = file_name.rfind('.') return file_name[:ext_pos] + '_middle' + file_name[ext_pos:] app_name = 'photo' parent", "\"\"\" ..liefert die Namen der normalen Bilder \"\"\" file_name = get_file_url(item_container) ext_pos =", "import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def photo_show(request,item_container): \"\"\" zeigt den Inhalt", "django.template.loader import get_template from django.template import Context from django.utils.translation import ugettext as _", "entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit 0.02 31.10.2007 Anzeige des Bildes", "content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und", "= item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments = '' vars", "item_container.get_parent() if parent.item.has_comments: comments = item_comment(request, item_container=item_container) else: comments = '' vars =", "genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 29.10.2007 Beginn der Arbeit" ]
[ "in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated = True, color = 'red')", "time from matplotlib import animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float):", "animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay = 500) plt.show() #animation(u,", "from matplotlib import animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig=", "animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims =", "im = plt.plot(postionSteps, U[:,i] , animated = True, color = 'red') ims.append(im) ani", "ani = animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay = 500)", "plt.plot(postionSteps, U[:,i] , animated = True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig,", "= (10), blit = True, repeat_delay = 500) plt.show() #animation(u, trial.k_N, trial.x_range, trial.k)", "plt import time from matplotlib import animation def animate(U, timeSteps: int, postionSteps: int,", "as plt import time from matplotlib import animation def animate(U, timeSteps: int, postionSteps:", "matplotlib import animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure()", "animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims = [] for", "= [] for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated =", "ims, interval = (10), blit = True, repeat_delay = 500) plt.show() #animation(u, trial.k_N,", "animated = True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval =", "U[:,i] , animated = True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims,", "= True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10),", "= plt.plot(postionSteps, U[:,i] , animated = True, color = 'red') ims.append(im) ani =", "def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims = []", "[] for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated = True,", "timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims = [] for i", "import matplotlib.pyplot as plt import time from matplotlib import animation def animate(U, timeSteps:", "interval = (10), blit = True, repeat_delay = 500) plt.show() #animation(u, trial.k_N, trial.x_range,", "ims = [] for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated", "timeStepSize: float): fig= plt.figure() ims = [] for i in range(timeSteps): im =", "True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10), blit", "int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims = [] for i in", ", animated = True, color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval", "int, timeStepSize: float): fig= plt.figure() ims = [] for i in range(timeSteps): im", "plt.figure() ims = [] for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] ,", "import animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float): fig= plt.figure() ims", "float): fig= plt.figure() ims = [] for i in range(timeSteps): im = plt.plot(postionSteps,", "range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated = True, color = 'red') ims.append(im)", "postionSteps: int, timeStepSize: float): fig= plt.figure() ims = [] for i in range(timeSteps):", "color = 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10), blit =", "= animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay = 500) plt.show()", "'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay", "matplotlib.pyplot as plt import time from matplotlib import animation def animate(U, timeSteps: int,", "i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated = True, color =", "for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i] , animated = True, color", "= 'red') ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10), blit = True,", "fig= plt.figure() ims = [] for i in range(timeSteps): im = plt.plot(postionSteps, U[:,i]", "import time from matplotlib import animation def animate(U, timeSteps: int, postionSteps: int, timeStepSize:", "ims.append(im) ani = animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay =" ]
[ "Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self):", "Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id)", "position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for", "TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected", "self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts(): expected +=", "for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions: if position['symbol']", "expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal(", "expected) def test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts(): positions = load_test_positions(account)", "self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected", "for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result,", "self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id),", "self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts(): positions =", "for position in positions: if position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(", "'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for account", "not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize(", "result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for account in", "setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id)", "'222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts(): positions", "positions = load_test_positions(self.account_id) expected = {} for position in positions: expected[position['symbol']] = Decimal(", "= self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00'))", "load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected) if __name__ ==", "{} for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions: if", "self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions: if position['symbol'] not in expected:", "= self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for", "def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts()", "result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize(", "= Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected) if __name__ == '__main__': unittest.main()", "import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator())", "position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal(", "class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def", "self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings()", "= Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions()", "decimal import Decimal from Broker import Broker from Portfolio import Account, Portfolio from", "load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio =", "in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions: if position['symbol'] not in", "def test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][", "Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id", "+= Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self):", "positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance", "expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result", "load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected", "'111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance()", "self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self):", "def test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][", "class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self):", "account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected)", "load_test_positions(self.account_id) expected = {} for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result", "test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected)", "expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result", "= Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id)", "= load_test_positions(account) for position in positions: if position['symbol'] not in expected: expected[position['symbol']] =", "expected = {} for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result =", "for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected)", "in positions: if position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else:", "test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self):", "expected) def test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal(", "self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for", "def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected =", "Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions", "load_test_positions(account) for position in positions: if position['symbol'] not in expected: expected[position['symbol']] = Decimal(", "expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def", "self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result,", "test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected =", "\\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker)", "expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result", "self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result,", "= ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for account in", "from decimal import Decimal from Broker import Broker from Portfolio import Account, Portfolio", "else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self):", "def test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for", "0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings()", "{} for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result,", "Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result,", "Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected", "account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions: if position['symbol'] not", "MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio", "self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts(): expected", "load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self):", "= Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222']", "result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {}", "self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111',", "expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected", "+= Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected =", "self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id))", "self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for position", "in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self):", "expected) def test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal(", "expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance =", "self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected)", "Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for account", "self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected =", "Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected", "in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class", "'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111'", "0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash()", "= {} for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions()", "= self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result =", "expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result =", "self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result =", "position in positions: if position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00'))", "in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def", "= self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts():", "load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id =", "Broker import Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance,", "TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self):", "load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts()", "expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for account", "test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for position in positions: expected[position['symbol']] =", "Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase):", "position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected =", "result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account", "import Decimal from Broker import Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator", "result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected =", "tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker = Broker()", "unittest from decimal import Decimal from Broker import Broker from Portfolio import Account,", "def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def", "from Broker import Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator,", "= Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions =", "= load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][", "account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected)", "from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker =", "positions = load_test_positions(account) for position in positions: if position['symbol'] not in expected: expected[position['symbol']]", "expected = {} for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in", "result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for account in", "Decimal from Broker import Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator import", "['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts():", "Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0", "expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash()", "= load_test_positions(self.account_id) expected = {} for position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00'))", "from Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class", "if position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] +=", "= 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result =", "expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected", "in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00'))", "positions: if position['symbol'] not in expected: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']]", "= '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result =", "self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for position in", "def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result,", "load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected = 0 for", "Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0", "test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self):", "= {} for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position in positions:", "self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts(): expected", "setUp(self): self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result", "self.account_id = '111111' self.account = Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result", "+= Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result, expected) def test_get_cash(self): expected =", "import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def", "self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts(): expected +=", "test_get_all_positions(self): expected = {} for account in self.portfolio.list_accounts(): positions = load_test_positions(account) for position", "= Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts()", "= self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def setUp(self): self.account_id = '111111' self.account =", "= self.portfolio.get_all_positions() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts():", "Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\ load_test_positions class TestPortfolio(unittest.TestCase): def setUp(self): self.broker", "expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase): def", "for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.portfolio.get_total_holdings() self.assertEqual(result,", "Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result,", "position in positions: expected[position['symbol']] = Decimal( position['currentMarketValue']).quantize(Decimal('0.00')) result = self.account.get_positions() self.assertEqual(result, expected) def", "self.broker = Broker() self.broker.set_authenticator(MockAuthenticator()) self.portfolio = Portfolio(self.broker) self.portfolio.load_accounts() def test_load_accounts(self): self.portfolio.load_accounts() result =", "= self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def test_get_all_positions(self): expected = {}", "test_get_cash(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00'))", "position['currentMarketValue']).quantize( Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected)", "expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for position in positions:", "self.account.get_positions() self.assertEqual(result, expected) def test_get_cash(self): balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result", "test_get_total_holdings(self): expected = 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00'))", "def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected", "def test_load_accounts(self): self.portfolio.load_accounts() result = self.portfolio.list_accounts() expected = ['111111', '222222'] self.assertEqual(result, expected) def", "def test_get_positions(self): positions = load_test_positions(self.account_id) expected = {} for position in positions: expected[position['symbol']]", "expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected)", "import Broker from Portfolio import Account, Portfolio from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \\", "= Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected)", "'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions = load_test_positions(self.account_id) expected =", "= 0 for account in self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result =", "load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00'))", "= load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected) if __name__", "expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected) if __name__ == '__main__':", "result = self.account.get_balance() self.assertEqual(result, expected) def test_get_total_holdings(self): expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result", "balance = load_test_balance(self.account_id) expected = Decimal(balance['combinedBalances'][0]['cash']).quantize( Decimal('0.00')) result = self.account.get_cash() self.assertEqual(result, expected) if", "import unittest from decimal import Decimal from Broker import Broker from Portfolio import", "Account(load_test_balance(self.account_id), load_test_positions(self.account_id)) def test_get_balance(self): expected = load_test_balance(self.account_id) result = self.account.get_balance() self.assertEqual(result, expected) def", "self.portfolio.list_accounts(): expected += Decimal( load_test_balance(account)['combinedBalances'][0][ 'cash']).quantize(Decimal('0.00')) result = self.portfolio.get_cash() self.assertEqual(result, expected) class TestAccount(unittest.TestCase):", "expected = Decimal( load_test_balance(self.account_id)['combinedBalances'][0][ 'marketValue']).quantize(Decimal('0.00')) result = self.account.get_total_holdings() self.assertEqual(result, expected) def test_get_positions(self): positions", "Decimal('0.00')) else: expected[position['symbol']] += Decimal( position['currentMarketValue']).quantize( Decimal('0.00')) result = self.portfolio.get_all_positions() self.assertEqual(result, expected) def" ]
[ "TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()):", "step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps:", "'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once()", "self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps", "self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one", "mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config", "as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once()", "baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one", "from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self):", "self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask(", "from unittest import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark", "= test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task", "source tree. from unittest import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import", "self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize()", "step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two]", "steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps ) def test_initialize(self):", "super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import", "mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' )", "[step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0)", "<reponame>deflect-ca/baskerville<gh_stars>1-10 # Copyright (c) 2020, eQualit.ie inc. # All rights reserved. # #", "steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self):", "test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task =", "LICENSE file in the root directory of this source tree. from unittest import", "def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps )", "def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps )", "self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask", "= mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize()", "assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config =", "import Task self.task = Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one =", "_helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps ) def", "= mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service:", "TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()):", "baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class", "class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self,", "mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once()", "license found in the # LICENSE file in the root directory of this", "as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config", "class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self,", "def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class", "class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self,", "This source code is licensed under the BSD-style license found in the #", "eQualit.ie inc. # All rights reserved. # # This source code is licensed", ") as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def", "self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up()", "mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once()", "2020, eQualit.ie inc. # All rights reserved. # # This source code is", "self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two", "# All rights reserved. # # This source code is licensed under the", "mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once()", "rights reserved. # # This source code is licensed under the BSD-style license", "the # LICENSE file in the root directory of this source tree. from", "self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class", "is licensed under the BSD-style license found in the # LICENSE file in", "self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object(", "found in the # LICENSE file in the root directory of this source", "self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock()", "mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in", "self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset()", "self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service. \\ assert_called_once() self.task.service_provider.initalize_ml_services.assert_called_once() step_one.initialize.assert_called_once()", "BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps", "as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self):", "= test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task", "MLTask self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock()", "MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock()", "= BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config,", "self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider,", "self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up:", "self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with", "setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base", "Task self.task = Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock()", "= Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two =", "self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf", "mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' )", "step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for", "steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self):", "as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service'", "import CacheTask self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one =", "self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two", "test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task =", "mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps)", "for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with", "= mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step", "mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config =", "0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once()", "from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up()", "# This source code is licensed under the BSD-style license found in the", "'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object(", "= BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config,", "step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' )", "mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps", "mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp()", "with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self):", "reserved. # # This source code is licensed under the BSD-style license found", "mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once()", "# LICENSE file in the root directory of this source tree. from unittest", "mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once()", "test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task =", ") as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once()", "BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps", "= mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once()", "as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\", "unittest import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from", "step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock()", "as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as", "step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once()", "self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service.", "step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def", "Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock()", "of this source tree. from unittest import mock from baskerville.models.config import BaskervilleConfig from", "mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf =", "in the # LICENSE file in the root directory of this source tree.", "directory of this source tree. from unittest import mock from baskerville.models.config import BaskervilleConfig", "(c) 2020, eQualit.ie inc. # All rights reserved. # # This source code", "step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def", "steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps =", "self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self):", "test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self):", "'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark):", "= [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) ==", "\\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf =", "import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate()", "= MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two =", "mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service:", "mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self):", "import MLTask self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one =", "tree. from unittest import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\", "self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task(", "step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one,", "BSD-style license found in the # LICENSE file in the root directory of", "def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two]", "mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def", "this source tree. from unittest import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base", "mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object(", "[step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service'", "in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider,", "step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' )", "in the root directory of this source tree. from unittest import mock from", "tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config =", "mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\", "_helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps ) def", "mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import", "source code is licensed under the BSD-style license found in the # LICENSE", "self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize()", "mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two =", "baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one", "step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service. \\ assert_called_once() self.task.service_provider.initalize_ml_services.assert_called_once()", "import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark):", "step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider,", "self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider =", "as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two", "inc. # All rights reserved. # # This source code is licensed under", "self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf", ") def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one,", "self.task.run() for step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up()", "def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def", "# Copyright (c) 2020, eQualit.ie inc. # All rights reserved. # # This", "'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset'", "BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def", "test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark):", "import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf", "'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf =", "self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask", "self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object(", ") as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf", "code is licensed under the BSD-style license found in the # LICENSE file", "= test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task", "step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as", "mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with", "the BSD-style license found in the # LICENSE file in the root directory", "from baskerville.models.pipeline_tasks.tasks_base import Task self.task = Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up()", "mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service:", "All rights reserved. # # This source code is licensed under the BSD-style", ") as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock()", "CacheTask self.task = CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock()", "mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf", "with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def", "step in mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object(", "test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def", "under the BSD-style license found in the # LICENSE file in the root", "self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate()", "with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as", "from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf", "with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() mock_initialize_request_set_cache_service.\\ assert_called_once() step_one.initialize.assert_called_once()", "self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask(", "from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config", "[step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service. \\ assert_called_once()", "_helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps ) def", "def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from", "root directory of this source tree. from unittest import mock from baskerville.models.config import", "licensed under the BSD-style license found in the # LICENSE file in the", "self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import Task", "= mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service. \\ assert_called_once() self.task.service_provider.initalize_ml_services.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once()", "file in the root directory of this source tree. from unittest import mock", "SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf", "def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps )", "mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp()", "self.task = Task( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two", "= [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider,", "== 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up()", "# # This source code is licensed under the BSD-style license found in", ") as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider, 'initialize_request_set_cache_service' ) as mock_initialize_request_set_cache_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once()", "test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider", "step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up' ) as", "test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps) self.task.run()", "with mock.patch.object( self.task.service_provider, 'finish_up' ) as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with", "'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once() mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one =", "tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf class TestTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp()", "= CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two =", "step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() class TestMLTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate()", "import mock from baskerville.models.config import BaskervilleConfig from tests.unit.baskerville_tests.helpers.spark_testing_base import \\ SQLTestCaseLatestSpark from tests.unit.baskerville_tests.helpers.utils", "test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with", "baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one", "from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up()", "mock_steps: step.set_df.assert_called_once() step.set_df.return_value.run.assert_called_once() self.assertTrue(len(self.task.remaining_steps) == 0) def test_finish_up(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'finish_up'", "mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset:", "CacheTask( self.baskerville_config, steps ) def test_initialize(self): self._helper_task_set_up() step_one = mock.MagicMock() step_two = mock.MagicMock()", "mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once()", "= [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\ .initialize_spark_service.assert_called_once() self.task.service_provider.initialize_request_set_cache_service. \\", "def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps = [step_one, step_two] self._helper_task_set_up(mock_steps)", "= mock.MagicMock() step_two = mock.MagicMock() self.task.steps = [step_one, step_two] with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service'", "'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: self.task.initialize() mock_initialize_db_tools_service.assert_called_once()", "self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' ) as mock_reset: self.task.reset() mock_reset.assert_called_once() class TestCacheTask(SQLTestCaseLatestSpark): def", "with mock.patch.object( self.task.service_provider, 'initialize_db_tools_service' ) as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as", "BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import MLTask self.task = MLTask( self.baskerville_config, steps", "TestCacheTask(SQLTestCaseLatestSpark): def setUp(self): super().setUp() self.test_conf = test_baskerville_conf self.baskerville_config = BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()):", "Copyright (c) 2020, eQualit.ie inc. # All rights reserved. # # This source", ") as mock_initialize_db_tools_service: with mock.patch.object( self.task.service_provider, 'initialize_spark_service' ) as mock_initialize_spark_service: with mock.patch.object( self.task.service_provider,", "the root directory of this source tree. from unittest import mock from baskerville.models.config", "= BaskervilleConfig(self.test_conf).validate() def _helper_task_set_up(self, steps=()): from baskerville.models.pipeline_tasks.tasks_base import CacheTask self.task = CacheTask( self.baskerville_config,", "= mock.MagicMock() self.task.steps = [step_one, step_two] self.task.service_provider = mock.MagicMock() self.task.initialize() self.task.service_provider.initialize_db_tools_service\\ .assert_called_once() self.task.service_provider\\", ") as mock_finish_up: self.task.finish_up() mock_finish_up.assert_called_once() def test_reset(self): self._helper_task_set_up() with mock.patch.object( self.task.service_provider, 'reset' )", "mock_initialize_spark_service.assert_called_once() step_one.initialize.assert_called_once() step_two.initialize.assert_called_once() def test_run(self): step_one = mock.MagicMock() step_two = mock.MagicMock() mock_steps =" ]
[ "hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head", "81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409 - with ntua twitter", "nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings])", "hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in", "2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid", "batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len", "head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall:", "loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp:", "hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:]", "80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434", "avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze()", "2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid", "avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2,", "accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419 - add Tweet", "= outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1", "in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for", "twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision:", "class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings =", "rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2", "precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn:", "BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key,", "80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426", "- with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid", "device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key", "AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup =", "__init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for", "return output ''' - with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss:", "- with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1:", "319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid", "with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1:", "outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1,", "= resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1", "self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio'])", "= nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in", "= self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] #", "twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision:", "self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item", "as nn from AbstractModel import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def", "hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings", "reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1,", "85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409 - with", "411|valid fp: 102|valid fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train", "apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid", "388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid", "Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid", "426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid", "2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid", "77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn: 81|valid tn: 413", "resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed", "attention_weights.append(w) avg_seq_logits = None for l in seq_logits: if avg_seq_logits is None: avg_seq_logits", "ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall:", "419 - with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1:", "self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1)", "key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim", "emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 =", "for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim,", "attention_weights = [], [] for i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output)", "None for l in seq_logits: if avg_seq_logits is None: avg_seq_logits = l else:", "l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits", "embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 =", "20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid", "404|valid fp: 119|valid fn: 68|valid tn: 409 - with ntua twitter embedding 2020.07.09", "83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09", "= sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim,", "1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output ''' - with stanford twitter embedding", "+ embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1))", "recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426 -", "# not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i", "fp: 94|valid fn: 74|valid tn: 434 - with stanford twitter embedding 100d 2020.07.09", "-1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1", "83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid", "nn from AbstractModel import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self,", "[] for i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits", "Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings", "nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)])", "self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed", "l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in", "resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 +", "77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409", "-1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for", "stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid", "= outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2,", "68|valid tn: 409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid", "74|valid tn: 434 - with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss:", "rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights", "tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid", "hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling =", "not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 =", "torch import torch.nn as nn from AbstractModel import AbstractModel from Attention import Attention", "f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn:", "# reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 =", "range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def", "with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid", "= super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim", "434 - with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss:", "= nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim,", "avg_seq_logits = None for l in seq_logits: if avg_seq_logits is None: avg_seq_logits =", "# reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 =", "resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed", "in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings))", "outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1,", "resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2,", "fp: 109|valid fn: 75|valid tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train", "109|valid fn: 75|valid tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss:", "fn: 74|valid tn: 419 - with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid", "outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 =", "emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim,", "if avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits + l", "<gh_stars>0 import torch import torch.nn as nn from AbstractModel import AbstractModel from Attention", "outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1,", "recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn: 81|valid tn: 413 '''", "self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim,", "dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings", "resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [],", "def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings,", "device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size", "496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid", "15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy:", "import torch.nn as nn from AbstractModel import AbstractModel from Attention import Attention class", "rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 =", "self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head']", "outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output =", "multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid", "precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn: 81|valid tn:", "99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid", "61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid", "not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i in", "f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn:", "tp: 411|valid fp: 102|valid fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch:", "= nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in", "with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision:", "tp: 397|valid fp: 109|valid fn: 75|valid tn: 419 - add Tweet normalizer 2020.07.03", "loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp:", "nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)", "21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid", "self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not", "123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid", "398|valid fp: 109|valid fn: 74|valid tn: 419 - with multi head 2020.07.02 00:32:48|epoch:", "/ self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output '''", "= resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 =", "fp: 102|valid fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss:", "tn: 409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss:", "super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim =", "- add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid", "82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid", "w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in seq_logits:", "= hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling", "with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1:", "# not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2", "batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for", "embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall:", "75|valid tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss:", "sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim", "nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output =", "253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid", "l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output =", "nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings]", "Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device,", "= emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2", "9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid", "f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn:", "precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn:", "from AbstractModel import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu',", "None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits", "output ''' - with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid", "398|valid fp: 94|valid fn: 74|valid tn: 434 - with stanford twitter embedding 100d", "84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419 - with", "accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426 - apply ekphrasis", "_ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class'])", "resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1,", "self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in seq_logits: if avg_seq_logits", "= outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output", "reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1,", "= self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i in range(self.num_head): l, w", "self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _", "hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:]", "self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self,", "100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall:", "f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn:", "-1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2", "loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp:", "avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits", "- apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision:", "2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid", "87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426 - apply", "self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim,", "20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy:", "avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits)", "hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:]", "= nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence):", "recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434 -", "tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1:", "range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l", "rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 =", "recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409 -", "94|valid fn: 74|valid tn: 434 - with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch:", "in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device)", "= embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 =", "sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']])", "loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp:", "409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid", "- with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid", "109|valid fn: 74|valid tn: 419 - with multi head 2020.07.02 00:32:48|epoch: 12|train loss:", "normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall:", "= l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head", "= nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence)", "stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid", "self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 =", "else: avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits =", "nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)", "14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy:", "seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in seq_logits: if avg_seq_logits is None:", "precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn:", "def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device)", "import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super() sup.__init__(device=device, hyper_params=hyper_params)", "reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i in range(self.num_head):", "84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419 - add", "max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1", "146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid", "140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid", "nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for", "fn: 68|valid tn: 409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss:", "in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim,", "+ rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2))", "for l in seq_logits: if avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits", "in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1", "loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp:", "hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2", "for i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits =", "hyper_params=hyper_params) self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for", "self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1", "drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i in range(self.num_head): l,", "loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp:", "l in seq_logits: if avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits =", "batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head =", "output = self.output(pooled_logits) return output ''' - with stanford twitter embedding 200d 2020.07.09", "self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings =", "+ l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output", "= nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in", "= self.output(pooled_logits) return output ''' - with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch:", "seq_logits: if avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits +", "2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid", "= nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention", "recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419 -", "embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len =", "17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid", "embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1", "78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419", "419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1:", "forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2)", "= None for l in seq_logits: if avg_seq_logits is None: avg_seq_logits = l", "twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid", "pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output ''' - with", "fn: 75|valid tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid", "f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn:", "i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None", "83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434 - with stanford twitter", "fn: 74|valid tn: 434 - with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train", "102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid", "-1, -1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights = [], []", "outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 =", "recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419 -", "-1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1", "tp: 398|valid fp: 94|valid fn: 74|valid tn: 434 - with stanford twitter embedding", "tp: 404|valid fp: 119|valid fn: 68|valid tn: 409 - with ntua twitter embedding", "outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1,", "119|valid fn: 68|valid tn: 409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train", "= self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] #", "= self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] #", "self.output(pooled_logits) return output ''' - with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train", "= resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2", "74|valid tn: 419 - with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss:", "fp: 119|valid fn: 68|valid tn: 409 - with ntua twitter embedding 2020.07.09 14:18:49|epoch:", "1).squeeze() output = self.output(pooled_logits) return output ''' - with stanford twitter embedding 200d", "103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid", "9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid", "torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 +", "avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output", "hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout", "accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409 - with ntua", "import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup", "rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2", "[], [] for i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w)", "outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2", "accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434 - with stanford", "batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout =", "self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output ''' -", "is None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits =", "for key in self.hyper_params['embeddings']]) emb_dim = sum([item.embedding_dim for item in self.embeddings]) self.hidden_size =", "+ rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2 =", "= resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits, attention_weights =", "batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 =", "self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings =", "= torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1", "fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss:", "rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1))", "- with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid", "84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434 - with", "= self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] #", "-1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1", "loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp:", "sum([item.embedding_dim for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)", "hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention = nn.ModuleList([Attention(dimensions=emb_dim)", "self.dropout(rev_resb2) seq_logits, attention_weights = [], [] for i in range(self.num_head): l, w =", "context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in seq_logits: if avg_seq_logits is", "precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn:", "AbstractModel import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None):", "79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn: 81|valid", "= avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return", "ntua twitter embedding 2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision:", "accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419 - with multi", "311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid", "self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed", "tn: 419 - with multi head 2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid", "loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp:", "81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid", "= nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim,", "nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.num_head = hyper_params['num_head'] self.attention =", "= nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)]) self.dropout = nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output", "for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1", "import torch import torch.nn as nn from AbstractModel import AbstractModel from Attention import", "fp: 109|valid fn: 74|valid tn: 419 - with multi head 2020.07.02 00:32:48|epoch: 12|train", "self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)", "200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall:", "from Attention import Attention class BiGruSelfattention(AbstractModel): def __init__(self, device='cpu', hyper_params=None): sup = super()", "02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy:", "resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 +", "+ rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output = self.dropout(rev_resb2)", "hidb1 = self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:]", "rev_resf2 rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed drop_output = self.dropout(rev_resb2) seq_logits,", "self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output ''' - with stanford twitter", "for item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1", "nn.Dropout(hyper_params['dropout_ratio']) self.pooling = nn.AdaptiveAvgPool1d(1) self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings", "tp: 398|valid fp: 109|valid fn: 74|valid tn: 419 - with multi head 2020.07.02", "= self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l) attention_weights.append(w) avg_seq_logits = None for l in seq_logits: if", "self.b_gru1(self.dropout(rev_resf1)) resb1 = outb1 + rev_resf1 rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not", "embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 = outf1 + embeddings rev_resf1 = resf1[:,torch.arange(max_len-1,", "add Tweet normalizer 2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision:", "precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn:", "12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid", "tn: 434 - with stanford twitter embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid", "= outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed outb2, hidb2", "-1),:] # reversed outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2)) resb2 = outb2 + rev_resf2 rev_resb2", "= [], [] for i in range(self.num_head): l, w = self.attention[i](query=drop_output, context=drop_output) seq_logits.append(l)", "81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid", "embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid", "81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419 - add Tweet normalizer", "loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp:", "[embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1,", "81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419 - with multi head", "f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn:", "loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp:", "-1, -1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 +", "embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1] outf1, hidf1 = self.f_gru1(self.dropout(embeddings)) resf1 =", "torch.nn as nn from AbstractModel import AbstractModel from Attention import Attention class BiGruSelfattention(AbstractModel):", "78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419", "00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy:", "self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru2 = nn.GRU(input_size=emb_dim,", "523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid", "loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp:", "resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2", "= self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze() output = self.output(pooled_logits) return output ''' - with stanford", "seq_logits, attention_weights = [], [] for i in range(self.num_head): l, w = self.attention[i](query=drop_output,", "18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy:", "loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp:", "= [embedding(batch_sentence) for embedding in self.embeddings] embeddings = torch.cat(embeddings, dim=2) max_len = embeddings.shape[1]", "''' - with stanford twitter embedding 200d 2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss:", "81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid", "avg_seq_logits = l else: avg_seq_logits = avg_seq_logits + l avg_seq_logits = avg_seq_logits /", "embedding 100d 2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid", "self.output = nn.Linear(emb_dim, hyper_params['num_class']) self.to(device) def forward(self, batch_sentence): embeddings = [embedding(batch_sentence) for embedding", "reversed outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1)) resf2 = outf2 + rev_resb1 rev_resf2 = resf2[:,torch.arange(max_len-1,", "= avg_seq_logits + l avg_seq_logits = avg_seq_logits / self.num_head pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2,", "2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid", "item in self.embeddings]) self.hidden_size = emb_dim self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True) self.b_gru1 =", "102|valid fn: 61|valid tn: 426 - apply ekphrasis 2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid", "in seq_logits: if avg_seq_logits is None: avg_seq_logits = l else: avg_seq_logits = avg_seq_logits", "397|valid fp: 109|valid fn: 75|valid tn: 419 - add Tweet normalizer 2020.07.03 20:04:02|epoch:" ]
[ "if (x,y) in points and (x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1,", "return False wall = TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor',", "\"\"\"Add object to the level's list of objects\"\"\" if obj in self.objects: return", "self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width = width self.height = height #self.setup()", "self.points from village import VillageGenerator from castle import CastleGenerator from fortress import FortressGenerator", "= None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be called from obj.move\"\"\"", "x self.y = y def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x", "\"\"\"Return whether the tile at p blocks sight\"\"\" for thing in self[p]: if", "npc in [obj for obj in self.objects if isinstance(obj, Villager)]: for i in", "y in xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions =", "self.y if callable(terrain): terrain = terrain(p) if x < 0 or y <", "IndexError): return None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all the", "name self.level = level self.points = points self.update() def update(self): \"\"\"Recalculate derivable properties", "dy * yy if p[0] < 0 or p[0] >= self.width or p[1]", "{location: 1} radius = 20 for oct in range(8): self._cast_light( location[0], location[1], 1,", "= -j-1, -j blocked = False while dx <= 0: dx += 1", "# Row is scanned; do next row unless last square was blocked: if", "def __str__(self): return self.name def __contains__(self, p): return p in self.points from village", "is None or thing.block_sight: return True return False def get_fov(self, location): \"\"\"Get the", "= TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False)", "if not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x =", "in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to", "= y def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x += x", "\"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name, index, block_move,", "self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator", "up an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\")", "by our cursor coords - this should only happen during level generation. if", "x, y in points])) def get_regions(self, location): \"\"\"Get regions containing the given location\"\"\"", "if blocked: # we're scanning a row of blocked squares: if self.block_sight(p): new_start", "in points])) def get_regions(self, location): \"\"\"Get regions containing the given location\"\"\" return [region", "return None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all the tiles", "and j < radius: # This is a blocking square, start a child", "return True else: return False def get_flood(self, x, y, points, connected): if (x,y)", "not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points,", "obj, location): \"\"\"Should only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location:", "True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#' : wall,", "level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for y in xrange(self.height):", "and initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy():", "return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given object or its container\"\"\"", "x2, y2): \"\"\"Iterator for all the tiles in the given rectangle\"\"\" for y", "'floor', (1,0), False, False) TERRAINS = {'#' : wall, '.' : floor} class", "height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize map. Override with level", "generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts()", "and translate by our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for x,", "region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points])) def get_regions(self, location):", "is a blocking square, start a child scan: blocked = True self._cast_light(cx, cy,", "Region: def __init__(self, name, level, points): self.name = name self.level = level self.points", "translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x += x self.y += y", "0, 1, -1, 0, 0, -1]] def __init__(self, world, width, height): self.world =", "the terrain making up an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\",", "y = p[1] + self.y if callable(terrain): terrain = terrain(p) if x <", "-j blocked = False while dx <= 0: dx += 1 # Translate", "dx, dy coordinates into map coordinates: p = cx + dx * xx", "<filename>src/level/level.py # encoding=utf-8 ### Levels define the terrain making up an area TEST_LEVEL", "-1, 0, 0, -1, 1, 0], [0, 1, 1, 0, 0, -1, -1,", "\"\"\"Get regions containing the given location\"\"\" return [region for region in self.regions if", "function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared = radius*radius for", "translate by our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for x, y", "self.block_sight(p): new_start = r_slope continue else: blocked = False start = new_start else:", ": wall, '.' : floor} class Level: mult = [[1, 0, 0, -1,", "location[1]) if location else None def __contains__(self, other): return other in self.objects class", "the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope:", "= sum((p[0] for p in self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points)", "level's origin; all terrain-drawing will be translated by this amount\"\"\" self.x = x", "store the slopes of the left and right # extremities of the square", "__contains__(self, p): return p in self.points from village import VillageGenerator from castle import", "<= 0: dx += 1 # Translate the dx, dy coordinates into map", "def __init__(self, world, width, height): self.world = world self.terraintypes = TERRAINS self.objects =", "yy if p[0] < 0 or p[0] >= self.width or p[1] < 0", "# encoding=utf-8 ### Levels define the terrain making up an area TEST_LEVEL =", "< radius: # This is a blocking square, start a child scan: blocked", "[region for region in self.regions if location in region] def get_regions_of(self, obj): \"\"\"Get", "y): \"\"\"Like set_cursor but relative\"\"\" self.x += x self.y += y def add_region(self,", "set of locations that can be seen from the given location\"\"\" light =", "+= 1 # Translate the dx, dy coordinates into map coordinates: p =", "y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location else None def", "xx + dy * xy, cy + dx * yx + dy *", "p[1] + self.y if callable(terrain): terrain = terrain(p) if x < 0 or", "self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not knowledge: break fact", "radius, xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\"", "< 0 or p[0] >= self.width or p[1] < 0 or p[1] >=", "name self.tiletype = 0 self.tileindex = index self.block_move = block_move self.block_sight = block_sight", "knowledge = [f for facts in [obj.get_facts() for obj in self.objects] for f", "there must be exactly one terrain # per tile, or even where it", "not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0]", "\"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared =", "slopes of the left and right # extremities of the square we're considering:", "get_tile(self, x, y): \"\"\"Return all the stuff at the given location\"\"\" try: return", "in self.objects class Region: def __init__(self, name, level, points): self.name = name self.level", "len(self.points) def __str__(self): return self.name def __contains__(self, p): return p in self.points from", "= width self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize", "level, points): self.name = name self.level = level self.points = points self.update() def", "= name self.level = level self.points = points self.update() def update(self): \"\"\"Recalculate derivable", "coordinates of given object or its container\"\"\" if not obj.container: return obj.location return", "[] for y in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width =", "in [obj for obj in self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)):", "is in the tile's list. def block_sight(self, p): \"\"\"Return whether the tile at", "y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected) def add_object(self,", "obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be called", "for obj in self.objects] for f in facts] for npc in [obj for", "return self.get_tile(location[0], location[1]) if location else None def __contains__(self, other): return other in", "\"\"\"Recalculate derivable properties of the region\"\"\" if self.points: x = sum((p[0] for p", "from castle import CastleGenerator from fortress import FortressGenerator from actor import Villager import", "p in self.points))/len(self.points) else: x = None y = None self.centre = (x,", "self.world = world self.terraintypes = TERRAINS self.objects = set() self.map = [] self.regions", "in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width = width self.height =", "with level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for y in", "= (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif end > l_slope: break", "the given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self,", "= set() self.map = [] self.regions = [] self.set_cursor(0,0) self.map = [] for", "actor import Villager import random grass = TerrainInfo('v', 'road', (0,1), False, False) class", "xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if", "from fortress import FortressGenerator from actor import Villager import random grass = TerrainInfo('v',", "location in region] def get_regions_of(self, obj): \"\"\"Get regions containing given object or its", "radius: # This is a blocking square, start a child scan: blocked =", "of the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start <", "0, light) return light def _cast_light(self, cx, cy, row, start, end, radius, xx,", "define the terrain making up an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\",", "x < 0 or y < 0 or x >= self.width or y", "self.level = level self.points = points self.update() def update(self): \"\"\"Recalculate derivable properties of", "or p[1] < 0 or p[1] >= self.height: continue # l_slope and r_slope", "connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add object to the level's", "def setup(self): \"\"\"Clear objects and initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0)", "\"\"\"Get the set of locations that can be seen from the given location\"\"\"", "light = {location: 1} radius = 20 for oct in range(8): self._cast_light( location[0],", "l_slope: break else: # Our light beam is touching this square; light it:", "dx**2 + dy**2 if dist_squared < radius_squared: light[p] = dist_squared if blocked: #", "connected): if (x,y) in points and (x,y) not in connected: connected.append((x,y)) else: return", "TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS", "not in self.objects: return obj.location = None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should", "the stuff at the given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return", "cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region)", "break def is_connected(self, points): if not points: return False connected = [] self.get_flood(points[0][0],", "key, kwargs[key]) def bumped(self, other): return False wall = TerrainInfo('#', 'wall', (0,0), True,", "should only happen during level generation. if obj.location: x, y = obj.location self[(x,y)].append(obj)", "self[p]: if thing is None or thing.block_sight: return True return False def get_fov(self,", "if self.points: x = sum((p[0] for p in self.points))/len(self.points) y = sum((p[1] for", "= world self.terraintypes = TERRAINS self.objects = set() self.map = [] self.regions =", "if callable(terrain): terrain = terrain(p) if x < 0 or y < 0", "in the tile's list. def block_sight(self, p): \"\"\"Return whether the tile at p", "all the tiles in the given rectangle\"\"\" for y in xrange(y1, y2): for", "self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0] + self.x y = p[1]", "obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared = radius*radius for j", "squares: if self.block_sight(p): new_start = r_slope continue else: blocked = False start =", "grass = TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self) self.set_cursor(100,100)", "len(set(points)): return True else: return False def get_flood(self, x, y, points, connected): if", "TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def", "= obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be", "xrange(width): self.map[-1].append([]) self.width = width self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear", "= TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#' : wall, '.' :", "self.objects.copy(): obj.destroy() for y in xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor]", "class TerrainInfo: def __init__(self, char, name, index, block_move, block_sight, **kwargs): self.char = char", "1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def", "points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points,", "= None y = None self.centre = (x, y) self.area = len(self.points) def", "# extremities of the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if", "tile, or even where it is in the tile's list. def block_sight(self, p):", "\"\"\"Should only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def", "points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return True else: return False def", "for y in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width = width", "y in points])) def get_regions(self, location): \"\"\"Get regions containing the given location\"\"\" return", "xy, yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start", "Levels define the terrain making up an area TEST_LEVEL = ( \"##########\", \"#....#...#\",", "key in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return False wall =", "dx * xx + dy * xy, cy + dx * yx +", "< 0 or p[1] >= self.height: continue # l_slope and r_slope store the", "touching this square; light it: dist_squared = dx**2 + dy**2 if dist_squared <", "we're scanning a row of blocked squares: if self.block_sight(p): new_start = r_slope continue", "blocked = False start = new_start else: if self.block_sight(p) and j < radius:", "if obj not in self.objects: return obj.location = None self.objects.remove(obj) def move_object(self, obj,", "location else None def __contains__(self, other): return other in self.objects class Region: def", "for f in facts] for npc in [obj for obj in self.objects if", "callable(terrain): terrain = terrain(p) if x < 0 or y < 0 or", "def update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if self.points: x = sum((p[0]", "in self.points from village import VillageGenerator from castle import CastleGenerator from fortress import", "-1]] def __init__(self, world, width, height): self.world = world self.terraintypes = TERRAINS self.objects", "encoding=utf-8 ### Levels define the terrain making up an area TEST_LEVEL = (", "\"##########\") class TerrainInfo: def __init__(self, char, name, index, block_move, block_sight, **kwargs): self.char =", "thing is None or thing.block_sight: return True return False def get_fov(self, location): \"\"\"Get", "square was blocked: if blocked: break def is_connected(self, points): if not points: return", "for obj in self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not", "-1, 0, 0, -1]] def __init__(self, world, width, height): self.world = world self.terraintypes", "\"\"\"Get regions containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj):", "connected) def add_object(self, obj): \"\"\"Add object to the level's list of objects\"\"\" if", "self.get_tile(location[0], location[1]) if location else None def __contains__(self, other): return other in self.objects", "unless last square was blocked: if blocked: break def is_connected(self, points): if not", "y-1, points, connected) def add_object(self, obj): \"\"\"Add object to the level's list of", "try: return self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self, x1, y1, x2,", "p in self.points from village import VillageGenerator from castle import CastleGenerator from fortress", "TerrainInfo: def __init__(self, char, name, index, block_move, block_sight, **kwargs): self.char = char self.name", "get_regions_of(self, obj): \"\"\"Get regions containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def", "__init__(self, name, level, points): self.name = name self.level = level self.points = points", "= cx + dx * xx + dy * xy, cy + dx", "continue else: blocked = False start = new_start else: if self.block_sight(p) and j", "__str__(self): return self.name def __contains__(self, p): return p in self.points from village import", "self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] # TODO: Nothing specifies that", "self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self, cx, cy, row,", "self.terraintypes = TERRAINS self.objects = set() self.map = [] self.regions = [] self.set_cursor(0,0)", "= [] def done_setup(self): \"\"\"Things to do after level generation\"\"\" for obj in", "[0, 1, -1, 0, 0, -1, 1, 0], [0, 1, 1, 0, 0,", "0, -1, -1, 0, 0, 1], [0, 1, -1, 0, 0, -1, 1,", "object or its container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self,", "points, connected) def add_object(self, obj): \"\"\"Add object to the level's list of objects\"\"\"", "only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self,", "< r_slope: continue elif end > l_slope: break else: # Our light beam", "\"\"\"Get coordinates of given object or its container\"\"\" if not obj.container: return obj.location", "\"\"\"Return all the stuff at the given location\"\"\" try: return self.map[y][x] except (KeyError,", "\"\"\"Like set_cursor but relative\"\"\" self.x += x self.y += y def add_region(self, region,", "terrain): x = p[0] + self.x y = p[1] + self.y if callable(terrain):", "village import VillageGenerator from castle import CastleGenerator from fortress import FortressGenerator from actor", "self.tileindex = index self.block_move = block_move self.block_sight = block_sight for key in kwargs:", "square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue", "Villager import random grass = TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def", "+ self.y if callable(terrain): terrain = terrain(p) if x < 0 or y", "p[1] < 0 or p[1] >= self.height: continue # l_slope and r_slope store", "y >= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain]", "amount\"\"\" self.x = x self.y = y def translate(self, x, y): \"\"\"Like set_cursor", "knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's", "containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates", "an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class", "height): self.world = world self.terraintypes = TERRAINS self.objects = set() self.map = []", "to do after level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge = [f", "given object or its container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container) def", "points and (x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected)", "= ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self,", "coords - this should only happen during level generation. if obj.location: x, y", "given location\"\"\" light = set((location,)) light = {location: 1} radius = 20 for", "in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct],", "-1, 1, 0], [0, 1, 1, 0, 0, -1, -1, 0], [1, 0,", "class Region: def __init__(self, name, level, points): self.name = name self.level = level", "\"\"\"Things to do after level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge =", "if x < 0 or y < 0 or x >= self.width or", "obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0] + self.x y", "blocks sight\"\"\" for thing in self[p]: if thing is None or thing.block_sight: return", "if start < r_slope: continue elif end > l_slope: break else: # Our", "objects and initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0) for obj in", "oct in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct],", "y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in", "return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected)", "return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] # TODO: Nothing", "Villager)]: for i in range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact)", "if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not knowledge: break fact =", "thing.block_sight: return True return False def get_fov(self, location): \"\"\"Get the set of locations", "self.objects: return obj.location = None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be", "else None def __contains__(self, other): return other in self.objects class Region: def __init__(self,", "def set_cursor(self, x, y): \"\"\"Set the level's origin; all terrain-drawing will be translated", "= [] self.set_cursor(0,0) self.map = [] for y in xrange(height): self.map.append([]) for x", "in range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x,", "self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location else None def __contains__(self,", "= [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to do after level", "def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all the tiles in the", "False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return", "floor} class Level: mult = [[1, 0, 0, -1, -1, 0, 0, 1],", "if not points: return False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if", "+= y def add_region(self, region, translate=True): \"\"\"Add a region and translate by our", "p = cx + dx * xx + dy * xy, cy +", "in self.objects: return obj.location = None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only", "self.regions = [] def done_setup(self): \"\"\"Things to do after level generation\"\"\" for obj", "self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the stuff at the given location\"\"\"", "our cursor coords - this should only happen during level generation. if obj.location:", "return radius_squared = radius*radius for j in range(row, radius+1): dx, dy = -j-1,", "\"\"\"Set the level's origin; all terrain-drawing will be translated by this amount\"\"\" self.x", "0, -1, -1, 0], [1, 0, 0, 1, -1, 0, 0, -1]] def", "obj in self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not knowledge:", "or y < 0 or x >= self.width or y >= self.height: return", "even where it is in the tile's list. def block_sight(self, p): \"\"\"Return whether", "npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's origin; all terrain-drawing will be", "\"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name, index, block_move, block_sight, **kwargs):", "from village import VillageGenerator from castle import CastleGenerator from fortress import FortressGenerator from", "yy, id+1, light) new_start = r_slope # Row is scanned; do next row", "{'#' : wall, '.' : floor} class Level: mult = [[1, 0, 0,", "set() self.map = [] self.regions = [] self.set_cursor(0,0) self.map = [] for y", "or even where it is in the tile's list. def block_sight(self, p): \"\"\"Return", "in xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = []", "Override with level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for y", "light def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy,", "in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected)", "in self.objects: obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts() for obj in", "[0, 1, 1, 0, 0, -1, -1, 0], [1, 0, 0, 1, -1,", "per tile, or even where it is in the tile's list. def block_sight(self,", "return other in self.objects class Region: def __init__(self, name, level, points): self.name =", "name, level, points): self.name = name self.level = level self.points = points self.update()", "- this should only happen during level generation. if obj.location: x, y =", "-j-1, -j blocked = False while dx <= 0: dx += 1 #", "obj): \"\"\"Add object to the level's list of objects\"\"\" if obj in self.objects:", "(0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self) self.set_cursor(100,100) #VillageGenerator(self).generate() #CastleGenerator(self).generate() FortressGenerator(self).generate() self.set_cursor(0,0)", "generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for y in xrange(self.height): for", "container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given object or its", "TODO: Nothing specifies that there must be exactly one terrain # per tile,", "of locations that can be seen from the given location\"\"\" light = set((location,))", "self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light)", "setup(self): \"\"\"Clear objects and initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0) for", "terrain = terrain(p) if x < 0 or y < 0 or x", "Nothing specifies that there must be exactly one terrain # per tile, or", "if self.block_sight(p) and j < radius: # This is a blocking square, start", "y = None self.centre = (x, y) self.area = len(self.points) def __str__(self): return", "yy, id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end:", "False wall = TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0),", "for y in xrange(y1, y2): for x in xrange(x1, x2): yield (x, y,", "for all the tiles in the given rectangle\"\"\" for y in xrange(y1, y2):", "def block_sight(self, p): \"\"\"Return whether the tile at p blocks sight\"\"\" for thing", "world self.terraintypes = TERRAINS self.objects = set() self.map = [] self.regions = []", "= radius*radius for j in range(row, radius+1): dx, dy = -j-1, -j blocked", "self.height: continue # l_slope and r_slope store the slopes of the left and", "\"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name,", "self.x += x self.y += y def add_region(self, region, translate=True): \"\"\"Add a region", "dist_squared < radius_squared: light[p] = dist_squared if blocked: # we're scanning a row", "in self.objects: return self.objects.add(obj) #Translate by our cursor coords - this should only", "def is_connected(self, points): if not points: return False connected = [] self.get_flood(points[0][0], points[0][1],", "terrain(p) if x < 0 or y < 0 or x >= self.width", "light) new_start = r_slope # Row is scanned; do next row unless last", "y): \"\"\"Return all the stuff at the given location\"\"\" try: return self.map[y][x] except", "len(set(connected)) == len(set(points)): return True else: return False def get_flood(self, x, y, points,", "y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only", "containing the given location\"\"\" return [region for region in self.regions if location in", "(x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y,", "[] self.regions = [] self.set_cursor(0,0) self.map = [] for y in xrange(height): self.map.append([])", "self.area = len(self.points) def __str__(self): return self.name def __contains__(self, p): return p in", "yx, yy, id+1, light) new_start = r_slope # Row is scanned; do next", "\"\"\"Clear objects and initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0) for obj", "cx, cy, row, start, end, radius, xx, xy, yx, yy, id, light): \"\"\"Recursive", "blocking square, start a child scan: blocked = True self._cast_light(cx, cy, j+1, start,", "self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self, cx, cy, row, start, end,", ">= self.height: continue # l_slope and r_slope store the slopes of the left", "derivable properties of the region\"\"\" if self.points: x = sum((p[0] for p in", "# TODO: Nothing specifies that there must be exactly one terrain # per", "[1, 0, 0, 1, -1, 0, 0, -1]] def __init__(self, world, width, height):", "right # extremities of the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5)", "one terrain # per tile, or even where it is in the tile's", "y in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width = width self.height", "0 or p[0] >= self.width or p[1] < 0 or p[1] >= self.height:", "xy, yx, yy, id+1, light) new_start = r_slope # Row is scanned; do", "yield (x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location else", "obj not in self.objects: return obj.location = None self.objects.remove(obj) def move_object(self, obj, location):", "while dx <= 0: dx += 1 # Translate the dx, dy coordinates", "points, connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add object to the", "= (x, y) self.area = len(self.points) def __str__(self): return self.name def __contains__(self, p):", "only be called from obj.destroy()\"\"\" if obj not in self.objects: return obj.location =", "get_flood(self, x, y, points, connected): if (x,y) in points and (x,y) not in", "in xrange(y1, y2): for x in xrange(x1, x2): yield (x, y, self.map[y][x]) def", "< end: return radius_squared = radius*radius for j in range(row, radius+1): dx, dy", "# Translate the dx, dy coordinates into map coordinates: p = cx +", "beam is touching this square; light it: dist_squared = dx**2 + dy**2 if", "in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return False wall = TerrainInfo('#',", "by our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for x, y in", "[obj.get_facts() for obj in self.objects] for f in facts] for npc in [obj", "or its container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p,", "None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be called from obj.move\"\"\" if", "self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be called from obj.move\"\"\" if obj.location:", "Level: mult = [[1, 0, 0, -1, -1, 0, 0, 1], [0, 1,", "[(x+self.x, y+self.y) for x, y in points])) def get_regions(self, location): \"\"\"Get regions containing", "start < r_slope: continue elif end > l_slope: break else: # Our light", ">= self.width or p[1] < 0 or p[1] >= self.height: continue # l_slope", "from the given location\"\"\" light = set((location,)) light = {location: 1} radius =", "None def __contains__(self, other): return other in self.objects class Region: def __init__(self, name,", "x = sum((p[0] for p in self.points))/len(self.points) y = sum((p[1] for p in", "x, y): \"\"\"Set the level's origin; all terrain-drawing will be translated by this", "stuff at the given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return None", "self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to do after level generation\"\"\" for", "but relative\"\"\" self.x += x self.y += y def add_region(self, region, translate=True): \"\"\"Add", "at p blocks sight\"\"\" for thing in self[p]: if thing is None or", "objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate by our cursor coords -", "= terrain else: self.map[y][x] = [terrain] # TODO: Nothing specifies that there must", ": floor} class Level: mult = [[1, 0, 0, -1, -1, 0, 0,", "x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should", "for y in xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions", "be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x,", "= x self.y = y def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\"", "self.regions if location in region] def get_regions_of(self, obj): \"\"\"Get regions containing given object", "= TERRAINS self.objects = set() self.map = [] self.regions = [] self.set_cursor(0,0) self.map", "for x in xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self, location): return", "y def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x += x self.y", "obj): \"\"\"Get regions containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self,", "y+self.y) for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for", "this square; light it: dist_squared = dx**2 + dy**2 if dist_squared < radius_squared:", "region] def get_regions_of(self, obj): \"\"\"Get regions containing given object or its container\"\"\" return", "random grass = TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self)", "in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points]))", "0, -1, 1, 0], [0, 1, 1, 0, 0, -1, -1, 0], [1,", "[terrain] # TODO: Nothing specifies that there must be exactly one terrain #", "in facts] for npc in [obj for obj in self.objects if isinstance(obj, Villager)]:", "= [(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x,", "1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self,", "self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self, cx, cy, row, start,", "points, connected): if (x,y) in points and (x,y) not in connected: connected.append((x,y)) else:", "x1, y1, x2, y2): \"\"\"Iterator for all the tiles in the given rectangle\"\"\"", "id+1, light) new_start = r_slope # Row is scanned; do next row unless", "def get_tile(self, x, y): \"\"\"Return all the stuff at the given location\"\"\" try:", "if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the stuff at the", "xrange(y1, y2): for x in xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self,", "j+1, start, l_slope, radius, xx, xy, yx, yy, id+1, light) new_start = r_slope", "relative\"\"\" self.x += x self.y += y def add_region(self, region, translate=True): \"\"\"Add a", "= p[1] + self.y if callable(terrain): terrain = terrain(p) if x < 0", "return [region for region in self.regions if location in region] def get_regions_of(self, obj):", "and (x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1,", "self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add object to the level's list", "self.width = width self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and", "char, name, index, block_move, block_sight, **kwargs): self.char = char self.name = name self.tiletype", "TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#' : wall, '.' : floor}", "self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize map. Override", "of the left and right # extremities of the square we're considering: l_slope,", "= dx**2 + dy**2 if dist_squared < radius_squared: light[p] = dist_squared if blocked:", "and r_slope store the slopes of the left and right # extremities of", "def get_coords_of(self, obj): \"\"\"Get coordinates of given object or its container\"\"\" if not", "if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the", "This is a blocking square, start a child scan: blocked = True self._cast_light(cx,", "start = new_start else: if self.block_sight(p) and j < radius: # This is", "**kwargs): self.char = char self.name = name self.tiletype = 0 self.tileindex = index", "__init__(self, world, width, height): self.world = world self.terraintypes = TERRAINS self.objects = set()", "__init__(self, char, name, index, block_move, block_sight, **kwargs): self.char = char self.name = name", "+ dx * xx + dy * xy, cy + dx * yx", "= r_slope # Row is scanned; do next row unless last square was", "p blocks sight\"\"\" for thing in self[p]: if thing is None or thing.block_sight:", "= p[0] + self.x y = p[1] + self.y if callable(terrain): terrain =", "0, 1], [0, 1, -1, 0, 0, -1, 1, 0], [0, 1, 1,", "radius*radius for j in range(row, radius+1): dx, dy = -j-1, -j blocked =", "a blocking square, start a child scan: blocked = True self._cast_light(cx, cy, j+1,", "the left and right # extremities of the square we're considering: l_slope, r_slope", "was blocked: if blocked: break def is_connected(self, points): if not points: return False", "0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self, cx,", "True return False def get_fov(self, location): \"\"\"Get the set of locations that can", "0], [1, 0, 0, 1, -1, 0, 0, -1]] def __init__(self, world, width,", "def remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\" if obj not in", "y = sum((p[1] for p in self.points))/len(self.points) else: x = None y =", "origin; all terrain-drawing will be translated by this amount\"\"\" self.x = x self.y", "dx, dy = -j-1, -j blocked = False while dx <= 0: dx", "light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared", "in self.points))/len(self.points) else: x = None y = None self.centre = (x, y)", "whether the tile at p blocks sight\"\"\" for thing in self[p]: if thing", "add_region(self, region, translate=True): \"\"\"Add a region and translate by our cursor\"\"\" if translate:", "the tile at p blocks sight\"\"\" for thing in self[p]: if thing is", "y def add_region(self, region, translate=True): \"\"\"Add a region and translate by our cursor\"\"\"", "all terrain-drawing will be translated by this amount\"\"\" self.x = x self.y =", "self.map = [] self.regions = [] self.set_cursor(0,0) self.map = [] for y in", "in self[p]: if thing is None or thing.block_sight: return True return False def", "self.char = char self.name = name self.tiletype = 0 self.tileindex = index self.block_move", "#self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize map. Override with level generation.\"\"\"", "= sum((p[1] for p in self.points))/len(self.points) else: x = None y = None", "connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x,", "called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y):", ">= self.width or y >= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else:", "of objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate by our cursor coords", "'road', (0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self) self.set_cursor(100,100) #VillageGenerator(self).generate() #CastleGenerator(self).generate() FortressGenerator(self).generate()", "0, 0, 1, -1, 0, 0, -1]] def __init__(self, world, width, height): self.world", "self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for y in xrange(self.height): for x", "at the given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return None def", "update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if self.points: x = sum((p[0] for", "self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points) else: x = None y", "for facts in [obj.get_facts() for obj in self.objects] for f in facts] for", "is touching this square; light it: dist_squared = dx**2 + dy**2 if dist_squared", "self.set_cursor(0,0) self.map = [] for y in xrange(height): self.map.append([]) for x in xrange(width):", "'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS =", "\"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name, index, block_move, block_sight,", "the set of locations that can be seen from the given location\"\"\" light", "(x,y) in points and (x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y,", "from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return", "TERRAINS self.objects = set() self.map = [] self.regions = [] self.set_cursor(0,0) self.map =", "obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the stuff", "if dist_squared < radius_squared: light[p] = dist_squared if blocked: # we're scanning a", "block_sight for key in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return False", "the given location\"\"\" return [region for region in self.regions if location in region]", "it: dist_squared = dx**2 + dy**2 if dist_squared < radius_squared: light[p] = dist_squared", "xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width = width self.height = height", "given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of", "points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj):", "x2): yield (x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location", "= points self.update() def update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if self.points:", "location\"\"\" light = set((location,)) light = {location: 1} radius = 20 for oct", "initialize map. Override with level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy()", "the given rectangle\"\"\" for y in xrange(y1, y2): for x in xrange(x1, x2):", "def __contains__(self, other): return other in self.objects class Region: def __init__(self, name, level,", "# This is a blocking square, start a child scan: blocked = True", "map. Override with level generation.\"\"\" self.set_cursor(0, 0) for obj in self.objects.copy(): obj.destroy() for", "obj): \"\"\"Should only be called from obj.destroy()\"\"\" if obj not in self.objects: return", "light beam is touching this square; light it: dist_squared = dx**2 + dy**2", "in region] def get_regions_of(self, obj): \"\"\"Get regions containing given object or its container\"\"\"", "do next row unless last square was blocked: if blocked: break def is_connected(self,", "char self.name = name self.tiletype = 0 self.tileindex = index self.block_move = block_move", "a region and translate by our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y)", "y < 0 or x >= self.width or y >= self.height: return if", "during level generation. if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x,", "level self.points = points self.update() def update(self): \"\"\"Recalculate derivable properties of the region\"\"\"", "get_regions(self, location): \"\"\"Get regions containing the given location\"\"\" return [region for region in", "self.map[-1].append([]) self.width = width self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects", "if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self,", "set_terrain(self, p, terrain): x = p[0] + self.x y = p[1] + self.y", "False def get_fov(self, location): \"\"\"Get the set of locations that can be seen", "= True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy, yx, yy, id+1,", "self, [(x+self.x, y+self.y) for x, y in points])) def get_regions(self, location): \"\"\"Get regions", "location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light", "get_coords_of(self, obj): \"\"\"Get coordinates of given object or its container\"\"\" if not obj.container:", "in range(row, radius+1): dx, dy = -j-1, -j blocked = False while dx", "__contains__(self, other): return other in self.objects class Region: def __init__(self, name, level, points):", "for thing in self[p]: if thing is None or thing.block_sight: return True return", "self.points: x = sum((p[0] for p in self.points))/len(self.points) y = sum((p[1] for p", "import random grass = TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def setup(self):", "new_start else: if self.block_sight(p) and j < radius: # This is a blocking", "True else: return False def get_flood(self, x, y, points, connected): if (x,y) in", "area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo:", "list of objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate by our cursor", "self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected) def", "self.width or y >= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x]", "must be exactly one terrain # per tile, or even where it is", "xx, xy, yx, yy, id+1, light) new_start = r_slope # Row is scanned;", "thing in self[p]: if thing is None or thing.block_sight: return True return False", "if len(set(connected)) == len(set(points)): return True else: return False def get_flood(self, x, y,", "location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the stuff at the given", "for p in self.points))/len(self.points) else: x = None y = None self.centre =", "y1, x2, y2): \"\"\"Iterator for all the tiles in the given rectangle\"\"\" for", "-1, 0], [1, 0, 0, 1, -1, 0, 0, -1]] def __init__(self, world,", "block_sight(self, p): \"\"\"Return whether the tile at p blocks sight\"\"\" for thing in", "0 or x >= self.width or y >= self.height: return if self.map[y][x]: self.map[y][x][0]", "sight\"\"\" for thing in self[p]: if thing is None or thing.block_sight: return True", "self.y += y def add_region(self, region, translate=True): \"\"\"Add a region and translate by", "( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char,", "in xrange(width): self.map[-1].append([]) self.width = width self.height = height #self.setup() #self.done_setup() def setup(self):", "set_cursor but relative\"\"\" self.x += x self.y += y def add_region(self, region, translate=True):", "and right # extremities of the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5),", "[(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y)", "self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add object", "radius_squared: light[p] = dist_squared if blocked: # we're scanning a row of blocked", "obj in self.objects] for f in facts] for npc in [obj for obj", "range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0,", "= 20 for oct in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius,", "y, points, connected): if (x,y) in points and (x,y) not in connected: connected.append((x,y))", "all the stuff at the given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError):", "if thing is None or thing.block_sight: return True return False def get_fov(self, location):", "self.block_sight = block_sight for key in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other):", "regions containing the given location\"\"\" return [region for region in self.regions if location", "obj in self.objects: return self.objects.add(obj) #Translate by our cursor coords - this should", "< 0 or y < 0 or x >= self.width or y >=", "other in self.objects class Region: def __init__(self, name, level, points): self.name = name", "None y = None self.centre = (x, y) self.area = len(self.points) def __str__(self):", "self.mult[3][oct], 0, light) return light def _cast_light(self, cx, cy, row, start, end, radius,", "obj in self.objects.copy(): obj.destroy() for y in xrange(self.height): for x in xrange(self.width): self.map[x][y]", "floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#' : wall, '.'", "blocked: if blocked: break def is_connected(self, points): if not points: return False connected", "level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge = [f for facts in", "connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected)", "TERRAINS = {'#' : wall, '.' : floor} class Level: mult = [[1,", "< 0 or x >= self.width or y >= self.height: return if self.map[y][x]:", "self.objects = set() self.map = [] self.regions = [] self.set_cursor(0,0) self.map = []", "return obj.location = None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be called", "in self.objects] for f in facts] for npc in [obj for obj in", "range(row, radius+1): dx, dy = -j-1, -j blocked = False while dx <=", "\"\"\"Add a region and translate by our cursor\"\"\" if translate: region.points = [(x+self.x,", "def get_regions(self, location): \"\"\"Get regions containing the given location\"\"\" return [region for region", "\"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name, index, block_move, block_sight, **kwargs): self.char", "y2): for x in xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self, location):", "in self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not knowledge: break", "can be seen from the given location\"\"\" light = set((location,)) light = {location:", "considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif end", "else: # Our light beam is touching this square; light it: dist_squared =", "done_setup(self): \"\"\"Things to do after level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge", "yx + dy * yy if p[0] < 0 or p[0] >= self.width", "for x in xrange(width): self.map[-1].append([]) self.width = width self.height = height #self.setup() #self.done_setup()", "self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be called from", "= [f for facts in [obj.get_facts() for obj in self.objects] for f in", "location\"\"\" return [region for region in self.regions if location in region] def get_regions_of(self,", "y+1, points, connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add object to", "location): \"\"\"Should only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj)", "from obj.destroy()\"\"\" if obj not in self.objects: return obj.location = None self.objects.remove(obj) def", "set_cursor(self, x, y): \"\"\"Set the level's origin; all terrain-drawing will be translated by", "terrain # per tile, or even where it is in the tile's list.", "return self.objects.add(obj) #Translate by our cursor coords - this should only happen during", "* xx + dy * xy, cy + dx * yx + dy", "self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points])) def get_regions(self,", "get_fov(self, location): \"\"\"Get the set of locations that can be seen from the", "set((location,)) light = {location: 1} radius = 20 for oct in range(8): self._cast_light(", "fortress import FortressGenerator from actor import Villager import random grass = TerrainInfo('v', 'road',", "def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x += x self.y +=", "given location\"\"\" return [region for region in self.regions if location in region] def", "(dx+0.5)/(dy-0.5) if start < r_slope: continue elif end > l_slope: break else: #", "obj.location = (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\"", "f in facts] for npc in [obj for obj in self.objects if isinstance(obj,", "self.objects class Region: def __init__(self, name, level, points): self.name = name self.level =", "1, -1, 0, 0, -1, 1, 0], [0, 1, 1, 0, 0, -1,", "= dist_squared if blocked: # we're scanning a row of blocked squares: if", "for j in range(row, radius+1): dx, dy = -j-1, -j blocked = False", "import CastleGenerator from fortress import FortressGenerator from actor import Villager import random grass", "p[1] >= self.height: continue # l_slope and r_slope store the slopes of the", "[obj for obj in self.objects if isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if", "# we're scanning a row of blocked squares: if self.block_sight(p): new_start = r_slope", "locations that can be seen from the given location\"\"\" light = set((location,)) light", "add_object(self, obj): \"\"\"Add object to the level's list of objects\"\"\" if obj in", "location): \"\"\"Get the set of locations that can be seen from the given", "self.block_move = block_move self.block_sight = block_sight for key in kwargs: setattr(self, key, kwargs[key])", "0 or y < 0 or x >= self.width or y >= self.height:", "x = p[0] + self.x y = p[1] + self.y if callable(terrain): terrain", "self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to do after", "coordinates: p = cx + dx * xx + dy * xy, cy", "connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return True", "obj.location = None self.objects.remove(obj) def move_object(self, obj, location): \"\"\"Should only be called from", "y) self.area = len(self.points) def __str__(self): return self.name def __contains__(self, p): return p", "[] self.set_cursor(0,0) self.map = [] for y in xrange(height): self.map.append([]) for x in", "in [obj.get_facts() for obj in self.objects] for f in facts] for npc in", "the given location\"\"\" light = set((location,)) light = {location: 1} radius = 20", "= False start = new_start else: if self.block_sight(p) and j < radius: #", "cy, j+1, start, l_slope, radius, xx, xy, yx, yy, id+1, light) new_start =", "square, start a child scan: blocked = True self._cast_light(cx, cy, j+1, start, l_slope,", "terrain making up an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\",", "self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] # TODO:", "self.objects] for f in facts] for npc in [obj for obj in self.objects", "return light def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx,", "False) TERRAINS = {'#' : wall, '.' : floor} class Level: mult =", "region and translate by our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for", "x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things", "obj.destroy()\"\"\" if obj not in self.objects: return obj.location = None self.objects.remove(obj) def move_object(self,", "def add_object(self, obj): \"\"\"Add object to the level's list of objects\"\"\" if obj", "#self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points])) def get_regions(self, location): \"\"\"Get", "block_move, block_sight, **kwargs): self.char = char self.name = name self.tiletype = 0 self.tileindex", "+= x self.y += y def add_region(self, region, translate=True): \"\"\"Add a region and", "r_slope store the slopes of the left and right # extremities of the", "self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy, yx, yy, id+1, light) new_start", "for i in range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def", "0], [0, 1, 1, 0, 0, -1, -1, 0], [1, 0, 0, 1,", "p): \"\"\"Return whether the tile at p blocks sight\"\"\" for thing in self[p]:", "making up an area TEST_LEVEL = ( \"##########\", \"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\",", "tiles in the given rectangle\"\"\" for y in xrange(y1, y2): for x in", "tile at p blocks sight\"\"\" for thing in self[p]: if thing is None", "move_object(self, obj, location): \"\"\"Should only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if", "points): self.name = name self.level = level self.points = points self.update() def update(self):", "\"#....#...#\", \"#....#...#\", \"#.####...#\", \"#........#\", \"#........#\", \"##########\") class TerrainInfo: def __init__(self, char, name, index,", "setattr(self, key, kwargs[key]) def bumped(self, other): return False wall = TerrainInfo('#', 'wall', (0,0),", "dx += 1 # Translate the dx, dy coordinates into map coordinates: p", "or p[1] >= self.height: continue # l_slope and r_slope store the slopes of", "\"\"\"Should only be called from obj.destroy()\"\"\" if obj not in self.objects: return obj.location", "p[0] + self.x y = p[1] + self.y if callable(terrain): terrain = terrain(p)", "self.centre = (x, y) self.area = len(self.points) def __str__(self): return self.name def __contains__(self,", "import Villager import random grass = TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level):", "FortressGenerator from actor import Villager import random grass = TerrainInfo('v', 'road', (0,1), False,", "http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared = radius*radius for j in range(row,", "points): if not points: return False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected)", "x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y", "1], [0, 1, -1, 0, 0, -1, 1, 0], [0, 1, 1, 0,", "blocked: # we're scanning a row of blocked squares: if self.block_sight(p): new_start =", "[] def done_setup(self): \"\"\"Things to do after level generation\"\"\" for obj in self.objects:", "x, y): \"\"\"Return all the stuff at the given location\"\"\" try: return self.map[y][x]", "def move_object(self, obj, location): \"\"\"Should only be called from obj.move\"\"\" if obj.location: self[obj.location].remove(obj)", "[floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to do after level generation\"\"\"", "continue # l_slope and r_slope store the slopes of the left and right", "coordinates into map coordinates: p = cx + dx * xx + dy", "def get_flood(self, x, y, points, connected): if (x,y) in points and (x,y) not", "we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif", "> l_slope: break else: # Our light beam is touching this square; light", "= [] self.regions = [] self.set_cursor(0,0) self.map = [] for y in xrange(height):", "remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\" if obj not in self.objects:", "connected.append((x,y)) else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1,", "in the given rectangle\"\"\" for y in xrange(y1, y2): for x in xrange(x1,", "None self.centre = (x, y) self.area = len(self.points) def __str__(self): return self.name def", "CastleGenerator from fortress import FortressGenerator from actor import Villager import random grass =", "cy, row, start, end, radius, xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting", "radius+1): dx, dy = -j-1, -j blocked = False while dx <= 0:", "(x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location else None", "the level's origin; all terrain-drawing will be translated by this amount\"\"\" self.x =", "def __init__(self, name, level, points): self.name = name self.level = level self.points =", "or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given object", "y+self.y) def remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\" if obj not", "new_start = r_slope # Row is scanned; do next row unless last square", "cursor coords - this should only happen during level generation. if obj.location: x,", "end, radius, xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained from", "scanned; do next row unless last square was blocked: if blocked: break def", "else: self.map[y][x] = [terrain] # TODO: Nothing specifies that there must be exactly", "of blocked squares: if self.block_sight(p): new_start = r_slope continue else: blocked = False", "dy**2 if dist_squared < radius_squared: light[p] = dist_squared if blocked: # we're scanning", "1 # Translate the dx, dy coordinates into map coordinates: p = cx", "after level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge = [f for facts", "+ self.x y = p[1] + self.y if callable(terrain): terrain = terrain(p) if", "j in range(row, radius+1): dx, dy = -j-1, -j blocked = False while", "TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self) self.set_cursor(100,100) #VillageGenerator(self).generate() #CastleGenerator(self).generate()", "Row is scanned; do next row unless last square was blocked: if blocked:", "[] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return True else: return", "return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0] + self.x", "connected) if len(set(connected)) == len(set(points)): return True else: return False def get_flood(self, x,", "in xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0], location[1])", "(1,0), False, False) TERRAINS = {'#' : wall, '.' : floor} class Level:", "region, translate=True): \"\"\"Add a region and translate by our cursor\"\"\" if translate: region.points", "of the region\"\"\" if self.points: x = sum((p[0] for p in self.points))/len(self.points) y", "start, end, radius, xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained", "False while dx <= 0: dx += 1 # Translate the dx, dy", "return p in self.points from village import VillageGenerator from castle import CastleGenerator from", "for npc in [obj for obj in self.objects if isinstance(obj, Villager)]: for i", "called from obj.destroy()\"\"\" if obj not in self.objects: return obj.location = None self.objects.remove(obj)", "square; light it: dist_squared = dx**2 + dy**2 if dist_squared < radius_squared: light[p]", "(KeyError, IndexError): return None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all", "def add_region(self, region, translate=True): \"\"\"Add a region and translate by our cursor\"\"\" if", "for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x,", "from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared = radius*radius for j in", "if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set", "self.x y = p[1] + self.y if callable(terrain): terrain = terrain(p) if x", "self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] # TODO: Nothing specifies that there", "(0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#'", "in self.regions if location in region] def get_regions_of(self, obj): \"\"\"Get regions containing given", "'.' : floor} class Level: mult = [[1, 0, 0, -1, -1, 0,", "* yx + dy * yy if p[0] < 0 or p[0] >=", "Our light beam is touching this square; light it: dist_squared = dx**2 +", "will be translated by this amount\"\"\" self.x = x self.y = y def", "child scan: blocked = True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy,", "1, 1, 0, 0, -1, -1, 0], [1, 0, 0, 1, -1, 0,", "location): return self.get_tile(location[0], location[1]) if location else None def __contains__(self, other): return other", "x, y): \"\"\"Like set_cursor but relative\"\"\" self.x += x self.y += y def", "blocked = False while dx <= 0: dx += 1 # Translate the", "continue elif end > l_slope: break else: # Our light beam is touching", "index, block_move, block_sight, **kwargs): self.char = char self.name = name self.tiletype = 0", "break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's origin;", "be called from obj.destroy()\"\"\" if obj not in self.objects: return obj.location = None", "else: if self.block_sight(p) and j < radius: # This is a blocking square,", "p[0] >= self.width or p[1] < 0 or p[1] >= self.height: continue #", "False start = new_start else: if self.block_sight(p) and j < radius: # This", "return False def get_fov(self, location): \"\"\"Get the set of locations that can be", "dx <= 0: dx += 1 # Translate the dx, dy coordinates into", "block_move self.block_sight = block_sight for key in kwargs: setattr(self, key, kwargs[key]) def bumped(self,", "#self.done_setup() def setup(self): \"\"\"Clear objects and initialize map. Override with level generation.\"\"\" self.set_cursor(0,", "def __getitem__(self, location): return self.get_tile(location[0], location[1]) if location else None def __contains__(self, other):", "[f for facts in [obj.get_facts() for obj in self.objects] for f in facts]", "region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points])) def", "it is in the tile's list. def block_sight(self, p): \"\"\"Return whether the tile", "obj.destroy() for y in xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player)", "radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return light def _cast_light(self, cx, cy,", "return False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)):", "in self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points) else: x = None", "self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x,", "be seen from the given location\"\"\" light = set((location,)) light = {location: 1}", "return True return False def get_fov(self, location): \"\"\"Get the set of locations that", "class Level: mult = [[1, 0, 0, -1, -1, 0, 0, 1], [0,", "from actor import Villager import random grass = TerrainInfo('v', 'road', (0,1), False, False)", "p): return p in self.points from village import VillageGenerator from castle import CastleGenerator", "self.points))/len(self.points) else: x = None y = None self.centre = (x, y) self.area", "= name self.tiletype = 0 self.tileindex = index self.block_move = block_move self.block_sight =", "None or thing.block_sight: return True return False def get_fov(self, location): \"\"\"Get the set", "= index self.block_move = block_move self.block_sight = block_sight for key in kwargs: setattr(self,", "+ dy * yy if p[0] < 0 or p[0] >= self.width or", "r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif end > l_slope:", "y in xrange(y1, y2): for x in xrange(x1, x2): yield (x, y, self.map[y][x])", "= block_sight for key in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return", "= {location: 1} radius = 20 for oct in range(8): self._cast_light( location[0], location[1],", "break else: # Our light beam is touching this square; light it: dist_squared", "import FortressGenerator from actor import Villager import random grass = TerrainInfo('v', 'road', (0,1),", "object to the level's list of objects\"\"\" if obj in self.objects: return self.objects.add(obj)", "that there must be exactly one terrain # per tile, or even where", "= (x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\" if", "0 or p[1] >= self.height: continue # l_slope and r_slope store the slopes", "if translate: region.points = [(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region) region.update()", "def __contains__(self, p): return p in self.points from village import VillageGenerator from castle", "20 for oct in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct],", "blocked: break def is_connected(self, points): if not points: return False connected = []", "self.name = name self.tiletype = 0 self.tileindex = index self.block_move = block_move self.block_sight", "facts] for npc in [obj for obj in self.objects if isinstance(obj, Villager)]: for", "import VillageGenerator from castle import CastleGenerator from fortress import FortressGenerator from actor import", "obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts() for obj in self.objects] for", "# per tile, or even where it is in the tile's list. def", "def __init__(self, char, name, index, block_move, block_sight, **kwargs): self.char = char self.name =", "self.objects.add(obj) #Translate by our cursor coords - this should only happen during level", "= 0 self.tileindex = index self.block_move = block_move self.block_sight = block_sight for key", "+ dy**2 if dist_squared < radius_squared: light[p] = dist_squared if blocked: # we're", "isinstance(obj, Villager)]: for i in range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge)", "self.points = points self.update() def update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if", "y): \"\"\"Set the level's origin; all terrain-drawing will be translated by this amount\"\"\"", "list. def block_sight(self, p): \"\"\"Return whether the tile at p blocks sight\"\"\" for", "new_start = r_slope continue else: blocked = False start = new_start else: if", "if blocked: break def is_connected(self, points): if not points: return False connected =", "generation. if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def", "l_slope, radius, xx, xy, yx, yy, id+1, light) new_start = r_slope # Row", "this amount\"\"\" self.x = x self.y = y def translate(self, x, y): \"\"\"Like", "for region in self.regions if location in region] def get_regions_of(self, obj): \"\"\"Get regions", "properties of the region\"\"\" if self.points: x = sum((p[0] for p in self.points))/len(self.points)", "self.x = x self.y = y def translate(self, x, y): \"\"\"Like set_cursor but", "radius = 20 for oct in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0,", "blocked = True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy, yx, yy,", "self.y = y def translate(self, x, y): \"\"\"Like set_cursor but relative\"\"\" self.x +=", "row of blocked squares: if self.block_sight(p): new_start = r_slope continue else: blocked =", "1} radius = 20 for oct in range(8): self._cast_light( location[0], location[1], 1, 1.0,", "obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y) def remove_object(self, obj):", "given location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self, x1,", "False, False) TERRAINS = {'#' : wall, '.' : floor} class Level: mult", "to the level's list of objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate", "random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's origin; all terrain-drawing will", "where it is in the tile's list. def block_sight(self, p): \"\"\"Return whether the", "r_slope continue else: blocked = False start = new_start else: if self.block_sight(p) and", "0, 0, -1, -1, 0, 0, 1], [0, 1, -1, 0, 0, -1,", "xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self): \"\"\"Things to do", "or y >= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] =", "of given object or its container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container)", "radius, xx, xy, yx, yy, id+1, light) new_start = r_slope # Row is", "or x >= self.width or y >= self.height: return if self.map[y][x]: self.map[y][x][0] =", "only happen during level generation. if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location", "our cursor\"\"\" if translate: region.points = [(x+self.x, y+self.y) for x, y in region.points]", "dy * xy, cy + dx * yx + dy * yy if", "x self.y += y def add_region(self, region, translate=True): \"\"\"Add a region and translate", "obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0] +", "the level's list of objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate by", "or p[0] >= self.width or p[1] < 0 or p[1] >= self.height: continue", "row unless last square was blocked: if blocked: break def is_connected(self, points): if", "x in xrange(x1, x2): yield (x, y, self.map[y][x]) def __getitem__(self, location): return self.get_tile(location[0],", "range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y):", "be translated by this amount\"\"\" self.x = x self.y = y def translate(self,", "by this amount\"\"\" self.x = x self.y = y def translate(self, x, y):", "or thing.block_sight: return True return False def get_fov(self, location): \"\"\"Get the set of", "self.name def __contains__(self, p): return p in self.points from village import VillageGenerator from", "connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1, points, connected) def add_object(self, obj): \"\"\"Add", "for x, y in points])) def get_regions(self, location): \"\"\"Get regions containing the given", "cy + dx * yx + dy * yy if p[0] < 0", "0) for obj in self.objects.copy(): obj.destroy() for y in xrange(self.height): for x in", "= [terrain] # TODO: Nothing specifies that there must be exactly one terrain", "r_slope: continue elif end > l_slope: break else: # Our light beam is", "= random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's origin; all terrain-drawing", "= height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize map. Override with", "points self.update() def update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if self.points: x", "for p in self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points) else: x", "region\"\"\" if self.points: x = sum((p[0] for p in self.points))/len(self.points) y = sum((p[1]", "= char self.name = name self.tiletype = 0 self.tileindex = index self.block_move =", "x >= self.width or y >= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain", "else: return False def get_flood(self, x, y, points, connected): if (x,y) in points", "self.objects: return self.objects.add(obj) #Translate by our cursor coords - this should only happen", "self.objects: obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts() for obj in self.objects]", "= terrain(p) if x < 0 or y < 0 or x >=", "cx + dx * xx + dy * xy, cy + dx *", "x = None y = None self.centre = (x, y) self.area = len(self.points)", "exactly one terrain # per tile, or even where it is in the", "id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return", "0, 0, -1, -1, 0], [1, 0, 0, 1, -1, 0, 0, -1]]", "light it: dist_squared = dx**2 + dy**2 if dist_squared < radius_squared: light[p] =", "obj in self.objects: obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts() for obj", "self.map = [] for y in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([])", "self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all the stuff at", "mult = [[1, 0, 0, -1, -1, 0, 0, 1], [0, 1, -1,", "terrain else: self.map[y][x] = [terrain] # TODO: Nothing specifies that there must be", "True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy, yx, yy, id+1, light)", "in self.objects.copy(): obj.destroy() for y in xrange(self.height): for x in xrange(self.width): self.map[x][y] =", "+ dx * yx + dy * yy if p[0] < 0 or", "self.name = name self.level = level self.points = points self.update() def update(self): \"\"\"Recalculate", "r_slope # Row is scanned; do next row unless last square was blocked:", "do after level generation\"\"\" for obj in self.objects: obj.level_setup_finished() knowledge = [f for", "dx * yx + dy * yy if p[0] < 0 or p[0]", "_cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy, id, light):", "dist_squared if blocked: # we're scanning a row of blocked squares: if self.block_sight(p):", "points])) def get_regions(self, location): \"\"\"Get regions containing the given location\"\"\" return [region for", "#Translate by our cursor coords - this should only happen during level generation.", "not points: return False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected))", "l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif end >", "is_connected(self, points): if not points: return False connected = [] self.get_flood(points[0][0], points[0][1], set(points),", "-1, -1, 0], [1, 0, 0, 1, -1, 0, 0, -1]] def __init__(self,", "j < radius: # This is a blocking square, start a child scan:", "last square was blocked: if blocked: break def is_connected(self, points): if not points:", "sum((p[0] for p in self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points) else:", "region.points = [(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name, self,", "= new_start else: if self.block_sight(p) and j < radius: # This is a", "-1, 0, 0, 1], [0, 1, -1, 0, 0, -1, 1, 0], [0,", "lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start < end: return radius_squared = radius*radius", "dy coordinates into map coordinates: p = cx + dx * xx +", "0 self.tileindex = index self.block_move = block_move self.block_sight = block_sight for key in", "def get_fov(self, location): \"\"\"Get the set of locations that can be seen from", "xy, cy + dx * yx + dy * yy if p[0] <", "scanning a row of blocked squares: if self.block_sight(p): new_start = r_slope continue else:", "p, terrain): x = p[0] + self.x y = p[1] + self.y if", "def done_setup(self): \"\"\"Things to do after level generation\"\"\" for obj in self.objects: obj.level_setup_finished()", "light = set((location,)) light = {location: 1} radius = 20 for oct in", "= [[1, 0, 0, -1, -1, 0, 0, 1], [0, 1, -1, 0,", "* xy, cy + dx * yx + dy * yy if p[0]", "container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x", "elif end > l_slope: break else: # Our light beam is touching this", "seen from the given location\"\"\" light = set((location,)) light = {location: 1} radius", "end > l_slope: break else: # Our light beam is touching this square;", "its container\"\"\" if not obj.container: return obj.location return self.get_coords_of(obj.container) def set_terrain(self, p, terrain):", "start, l_slope, radius, xx, xy, yx, yy, id+1, light) new_start = r_slope #", "= len(self.points) def __str__(self): return self.name def __contains__(self, p): return p in self.points", "light[p] = dist_squared if blocked: # we're scanning a row of blocked squares:", "if p[0] < 0 or p[0] >= self.width or p[1] < 0 or", "other): return other in self.objects class Region: def __init__(self, name, level, points): self.name", "\"\"\"Iterator for all the tiles in the given rectangle\"\"\" for y in xrange(y1,", "* yy if p[0] < 0 or p[0] >= self.width or p[1] <", "return self.name def __contains__(self, p): return p in self.points from village import VillageGenerator", "the tile's list. def block_sight(self, p): \"\"\"Return whether the tile at p blocks", "__getitem__(self, location): return self.get_tile(location[0], location[1]) if location else None def __contains__(self, other): return", "(x, y) self.area = len(self.points) def __str__(self): return self.name def __contains__(self, p): return", "for oct in range(8): self._cast_light( location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct],", "width, height): self.world = world self.terraintypes = TERRAINS self.objects = set() self.map =", "castle import CastleGenerator from fortress import FortressGenerator from actor import Villager import random", "< radius_squared: light[p] = dist_squared if blocked: # we're scanning a row of", "i in range(random.randrange(100,101)): if not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self,", "= {'#' : wall, '.' : floor} class Level: mult = [[1, 0,", "-1, -1, 0, 0, 1], [0, 1, -1, 0, 0, -1, 1, 0],", "location[0], location[1], 1, 1.0, 0.0, radius, self.mult[0][oct], self.mult[1][oct], self.mult[2][oct], self.mult[3][oct], 0, light) return", "= [] for y in xrange(height): self.map.append([]) for x in xrange(width): self.map[-1].append([]) self.width", "def get_regions_of(self, obj): \"\"\"Get regions containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj))", "def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy, id,", "### Levels define the terrain making up an area TEST_LEVEL = ( \"##########\",", "self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given object or its container\"\"\" if", "def bumped(self, other): return False wall = TerrainInfo('#', 'wall', (0,0), True, True) floor", "return False def get_flood(self, x, y, points, connected): if (x,y) in points and", "object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given", "left and right # extremities of the square we're considering: l_slope, r_slope =", "y2): \"\"\"Iterator for all the tiles in the given rectangle\"\"\" for y in", "given rectangle\"\"\" for y in xrange(y1, y2): for x in xrange(x1, x2): yield", "else: x = None y = None self.centre = (x, y) self.area =", "True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False, False) TERRAINS = {'#' :", "the region\"\"\" if self.points: x = sum((p[0] for p in self.points))/len(self.points) y =", "def set_terrain(self, p, terrain): x = p[0] + self.x y = p[1] +", "= level self.points = points self.update() def update(self): \"\"\"Recalculate derivable properties of the", "VillageGenerator from castle import CastleGenerator from fortress import FortressGenerator from actor import Villager", "that can be seen from the given location\"\"\" light = set((location,)) light =", "index self.block_move = block_move self.block_sight = block_sight for key in kwargs: setattr(self, key,", "xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if", "if location in region] def get_regions_of(self, obj): \"\"\"Get regions containing given object or", "[[1, 0, 0, -1, -1, 0, 0, 1], [0, 1, -1, 0, 0,", "translate=True): \"\"\"Add a region and translate by our cursor\"\"\" if translate: region.points =", "self.map[y][x] = [terrain] # TODO: Nothing specifies that there must be exactly one", "location): \"\"\"Get regions containing the given location\"\"\" return [region for region in self.regions", "row, start, end, radius, xx, xy, yx, yy, id, light): \"\"\"Recursive lightcasting function,", "start < end: return radius_squared = radius*radius for j in range(row, radius+1): dx,", "a row of blocked squares: if self.block_sight(p): new_start = r_slope continue else: blocked", "self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return True else: return False", "if obj in self.objects: return self.objects.add(obj) #Translate by our cursor coords - this", "points: return False connected = [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) ==", "if location else None def __contains__(self, other): return other in self.objects class Region:", "next row unless last square was blocked: if blocked: break def is_connected(self, points):", "if start < end: return radius_squared = radius*radius for j in range(row, radius+1):", "the tiles in the given rectangle\"\"\" for y in xrange(y1, y2): for x", "dy = -j-1, -j blocked = False while dx <= 0: dx +=", "= r_slope continue else: blocked = False start = new_start else: if self.block_sight(p)", "= TerrainInfo('v', 'road', (0,1), False, False) class TestLevel(Level): def setup(self): Level.setup(self) self.set_cursor(100,100) #VillageGenerator(self).generate()", "except (KeyError, IndexError): return None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for", "scan: blocked = True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx, xy, yx,", "facts in [obj.get_facts() for obj in self.objects] for f in facts] for npc", "block_sight, **kwargs): self.char = char self.name = name self.tiletype = 0 self.tileindex =", "wall, '.' : floor} class Level: mult = [[1, 0, 0, -1, -1,", "x in xrange(width): self.map[-1].append([]) self.width = width self.height = height #self.setup() #self.done_setup() def", "bumped(self, other): return False wall = TerrainInfo('#', 'wall', (0,0), True, True) floor =", "terrain-drawing will be translated by this amount\"\"\" self.x = x self.y = y", "the dx, dy coordinates into map coordinates: p = cx + dx *", "= False while dx <= 0: dx += 1 # Translate the dx,", "blocked squares: if self.block_sight(p): new_start = r_slope continue else: blocked = False start", "1, -1, 0, 0, -1]] def __init__(self, world, width, height): self.world = world", "Translate the dx, dy coordinates into map coordinates: p = cx + dx", "else: blocked = False start = new_start else: if self.block_sight(p) and j <", "self.regions = [] self.set_cursor(0,0) self.map = [] for y in xrange(height): self.map.append([]) for", "else: return self.get_flood(x+1, y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points,", "name, index, block_move, block_sight, **kwargs): self.char = char self.name = name self.tiletype =", "for obj in self.objects: obj.level_setup_finished() knowledge = [f for facts in [obj.get_facts() for", "return self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self, x1, y1, x2, y2):", "if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] # TODO: Nothing specifies", "for key in kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return False wall", "translated by this amount\"\"\" self.x = x self.y = y def translate(self, x,", "x, y, points, connected): if (x,y) in points and (x,y) not in connected:", "p in self.points))/len(self.points) y = sum((p[1] for p in self.points))/len(self.points) else: x =", "set(points), connected) if len(set(connected)) == len(set(points)): return True else: return False def get_flood(self,", "happen during level generation. if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location =", "+ dy * xy, cy + dx * yx + dy * yy", "yx, yy, id, light): \"\"\"Recursive lightcasting function, obtained from http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation\"\"\" if start <", "1, 0], [0, 1, 1, 0, 0, -1, -1, 0], [1, 0, 0,", "width self.height = height #self.setup() #self.done_setup() def setup(self): \"\"\"Clear objects and initialize map.", "tile's list. def block_sight(self, p): \"\"\"Return whether the tile at p blocks sight\"\"\"", "1, 0, 0, -1, -1, 0], [1, 0, 0, 1, -1, 0, 0,", "(dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start < r_slope: continue elif end > l_slope: break else:", "= None self.centre = (x, y) self.area = len(self.points) def __str__(self): return self.name", "dist_squared = dx**2 + dy**2 if dist_squared < radius_squared: light[p] = dist_squared if", "kwargs: setattr(self, key, kwargs[key]) def bumped(self, other): return False wall = TerrainInfo('#', 'wall',", "extremities of the square we're considering: l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5) if start", "== len(set(points)): return True else: return False def get_flood(self, x, y, points, connected):", "obj.move\"\"\" if obj.location: self[obj.location].remove(obj) if location: self[location].append(obj) def get_tile(self, x, y): \"\"\"Return all", "its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get coordinates of given object or", "= set((location,)) light = {location: 1} radius = 20 for oct in range(8):", "# l_slope and r_slope store the slopes of the left and right #", "a child scan: blocked = True self._cast_light(cx, cy, j+1, start, l_slope, radius, xx,", "= [] self.get_flood(points[0][0], points[0][1], set(points), connected) if len(set(connected)) == len(set(points)): return True else:", "0, 0, -1, 1, 0], [0, 1, 1, 0, 0, -1, -1, 0],", "self.block_sight(p) and j < radius: # This is a blocking square, start a", "location\"\"\" try: return self.map[y][x] except (KeyError, IndexError): return None def get_tiles(self, x1, y1,", "for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def done_setup(self):", "0, 0, -1]] def __init__(self, world, width, height): self.world = world self.terraintypes =", "self.tiletype = 0 self.tileindex = index self.block_move = block_move self.block_sight = block_sight for", "xrange(self.height): for x in xrange(self.width): self.map[x][y] = [floor] self[self.world.player.location].append(self.world.player) self.regions = [] def", "specifies that there must be exactly one terrain # per tile, or even", "0, -1]] def __init__(self, world, width, height): self.world = world self.terraintypes = TERRAINS", "for obj in self.objects.copy(): obj.destroy() for y in xrange(self.height): for x in xrange(self.width):", "end: return radius_squared = radius*radius for j in range(row, radius+1): dx, dy =", "y+self.y) for x, y in points])) def get_regions(self, location): \"\"\"Get regions containing the", "None def get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all the tiles in", "0: dx += 1 # Translate the dx, dy coordinates into map coordinates:", "= block_move self.block_sight = block_sight for key in kwargs: setattr(self, key, kwargs[key]) def", "not knowledge: break fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the", "level generation. if obj.location: x, y = obj.location self[(x,y)].append(obj) obj.location = (x+self.x, y+self.y)", "0, 0, 1], [0, 1, -1, 0, 0, -1, 1, 0], [0, 1,", "l_slope and r_slope store the slopes of the left and right # extremities", "level's list of objects\"\"\" if obj in self.objects: return self.objects.add(obj) #Translate by our", "kwargs[key]) def bumped(self, other): return False wall = TerrainInfo('#', 'wall', (0,0), True, True)", "the slopes of the left and right # extremities of the square we're", "obj): \"\"\"Get coordinates of given object or its container\"\"\" if not obj.container: return", "self.update() def update(self): \"\"\"Recalculate derivable properties of the region\"\"\" if self.points: x =", "be exactly one terrain # per tile, or even where it is in", "map coordinates: p = cx + dx * xx + dy * xy,", "get_tiles(self, x1, y1, x2, y2): \"\"\"Iterator for all the tiles in the given", "regions containing given object or its container\"\"\" return self.get_regions(self.get_coords_of(obj)) def get_coords_of(self, obj): \"\"\"Get", "in points and (x,y) not in connected: connected.append((x,y)) else: return self.get_flood(x+1, y, points,", "p[0] < 0 or p[0] >= self.width or p[1] < 0 or p[1]", "sum((p[1] for p in self.points))/len(self.points) else: x = None y = None self.centre", "radius_squared = radius*radius for j in range(row, radius+1): dx, dy = -j-1, -j", "return self.get_coords_of(obj.container) def set_terrain(self, p, terrain): x = p[0] + self.x y =", "self.width or p[1] < 0 or p[1] >= self.height: continue # l_slope and", "is scanned; do next row unless last square was blocked: if blocked: break", "y, points, connected) self.get_flood(x-1, y, points, connected) self.get_flood(x, y+1, points, connected) self.get_flood(x, y-1,", "(x+self.x, y+self.y) def remove_object(self, obj): \"\"\"Should only be called from obj.destroy()\"\"\" if obj", "fact = random.choice(knowledge) npc.knowledge.add(fact) def set_cursor(self, x, y): \"\"\"Set the level's origin; all", "world, width, height): self.world = world self.terraintypes = TERRAINS self.objects = set() self.map", "# Our light beam is touching this square; light it: dist_squared = dx**2", "rectangle\"\"\" for y in xrange(y1, y2): for x in xrange(x1, x2): yield (x,", "region in self.regions if location in region] def get_regions_of(self, obj): \"\"\"Get regions containing", "start a child scan: blocked = True self._cast_light(cx, cy, j+1, start, l_slope, radius,", "into map coordinates: p = cx + dx * xx + dy *", "False def get_flood(self, x, y, points, connected): if (x,y) in points and (x,y)", "light) return light def _cast_light(self, cx, cy, row, start, end, radius, xx, xy,", "this should only happen during level generation. if obj.location: x, y = obj.location", "if self.block_sight(p): new_start = r_slope continue else: blocked = False start = new_start", "wall = TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·', 'floor', (1,0), False,", "other): return False wall = TerrainInfo('#', 'wall', (0,0), True, True) floor = TerrainInfo(u'·',", "translate: region.points = [(x+self.x, y+self.y) for x, y in region.points] self.regions.append(region) region.update() #self.regions.append(Region(name,", ">= self.height: return if self.map[y][x]: self.map[y][x][0] = terrain else: self.map[y][x] = [terrain] #" ]
[ "Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion) admin.site.register(Instruction) admin.site.register(Company) admin.site.register(Prescription) admin.site.register(PrescribedMedicine) admin.site.register(PatientHistory)", "import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion) admin.site.register(Instruction)", "Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion) admin.site.register(Instruction) admin.site.register(Company)", "MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion) admin.site.register(Instruction) admin.site.register(Company) admin.site.register(Prescription)", ".models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion)", "admin from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine)", "import admin from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory", "django.contrib import admin from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine,", "Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm) admin.site.register(Suggestion) admin.site.register(Instruction) admin.site.register(Company) admin.site.register(Prescription) admin.site.register(PrescribedMedicine)", "from django.contrib import admin from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription,", "from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory admin.site.register(Medicine) admin.site.register(MedicineForm)" ]
[ "for line in file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\" or line[0:1]", "len(v) == 1: return ord(v) return int(v[2:4], 16) def get_string(value): value = value.strip()", "== '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None", "line == \"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue", "matches: comps = m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]),", "== 1: return ord(v) return int(v[2:4], 16) def get_string(value): value = value.strip() if", "line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\")", "= line.strip() if line == \"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\")", "re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests =", "if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests = [ ]", "output = \"\" for line in file(\"test-vectors-00.txt\"): line = line.strip() if line ==", "return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None raise Exception(\"unhandled\")", "] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps = m[1:-1].split(\",\") test", "raise Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in", "if value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests = [ ] matches", "m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2])", "in file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\" or line[0:1] == \"#\":", "= get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] = get_string(comps[3])", "if line == \"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"):", "= dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) ) if", "def get_byte(v): if len(v) == 1: return ord(v) return int(v[2:4], 16) def get_string(value):", "[ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps = m[1:-1].split(\",\")", "test = dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) )", "if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] =", ">= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if", "input = get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] =", "value.strip() if value[0] == '\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"',", "def get_string(value): value = value.strip() if value[0] == '\"' and value[-1] == '\"':", "and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\":", "5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"] = comps[5].strip() Tests.append(test) print", "line = line.strip() if line == \"\" or line[0:1] == \"#\": continue if", "value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return", "in matches: comps = m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input =", "4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps)", ") if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"]", "= \"\" for line in file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\"", "16) def get_string(value): value = value.strip() if value[0] == '\"' and value[-1] ==", "= [ ] def get_byte(v): if len(v) == 1: return ord(v) return int(v[2:4],", "'\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None raise", "map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests", "continue output += line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v): if len(v)", "= [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps =", "matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps = m[1:-1].split(\",\") test =", "= m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output =", "== \"null\": return None raise Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\",", "dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) ) if len(comps)", "test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"] = comps[5].strip() Tests.append(test) print json.dumps(Tests)", "get_string(value): value = value.strip() if value[0] == '\"' and value[-1] == '\"': return", "output) for m in matches: comps = m[1:-1].split(\",\") test = dict( comment =", "ord(v) return int(v[2:4], 16) def get_string(value): value = value.strip() if value[0] == '\"'", "Tests = [ ] def get_byte(v): if len(v) == 1: return ord(v) return", "test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps) >=", "\"\") Tests = [ ] def get_byte(v): if len(v) == 1: return ord(v)", "if len(v) == 1: return ord(v) return int(v[2:4], 16) def get_string(value): value =", "1: return ord(v) return int(v[2:4], 16) def get_string(value): value = value.strip() if value[0]", "import re output = \"\" for line in file(\"test-vectors-00.txt\"): line = line.strip() if", "m in matches: comps = m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input", "return None raise Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for", "''))) if value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests = [ ]", "comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"]", "comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >=", "line.strip() if line == \"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or", "\"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests =", "return int(v[2:4], 16) def get_string(value): value = value.strip() if value[0] == '\"' and", "import json import re output = \"\" for line in file(\"test-vectors-00.txt\"): line =", "= value.strip() if value[0] == '\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\",", "output = get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps)", "line in file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\" or line[0:1] ==", "Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps", "[ ] def get_byte(v): if len(v) == 1: return ord(v) return int(v[2:4], 16)", "\"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output +=", "or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v):", "re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps = m[1:-1].split(\",\") test = dict( comment", "file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\" or line[0:1] == \"#\": continue", "for m in matches: comps = m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1],", ">= 5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"] = comps[5].strip() Tests.append(test)", "== '\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower()", "value[1:-1].replace('\"\"', ''))) if value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests = [", "or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\",", "int(v[2:4], 16) def get_string(value): value = value.strip() if value[0] == '\"' and value[-1]", "get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5:", "= get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >=", "json import re output = \"\" for line in file(\"test-vectors-00.txt\"): line = line.strip()", "Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m in matches:", "= comps[0].strip()[1:-1], input = get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >= 4:", "continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests = [", "output += line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v): if len(v) ==", "return ord(v) return int(v[2:4], 16) def get_string(value): value = value.strip() if value[0] ==", "= re.findall(\"({(?:.|\\n)*?})\", output) for m in matches: comps = m[1:-1].split(\",\") test = dict(", "len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip()", "value[0] == '\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if", "] def get_byte(v): if len(v) == 1: return ord(v) return int(v[2:4], 16) def", "value.lower() == \"null\": return None raise Exception(\"unhandled\") Tests = [ ] matches =", "line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests = [ ] def", "re output = \"\" for line in file(\"test-vectors-00.txt\"): line = line.strip() if line", "== \"\" or line[0:1] == \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output", "if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"] =", "None raise Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output) for m", "get_byte(v): if len(v) == 1: return ord(v) return int(v[2:4], 16) def get_string(value): value", "line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v): if", "comps = m[1:-1].split(\",\") test = dict( comment = comps[0].strip()[1:-1], input = get_string(comps[1]), output", "\"null\": return None raise Exception(\"unhandled\") Tests = [ ] matches = re.findall(\"({(?:.|\\n)*?})\", output)", "if value[0] == '\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', '')))", "== \"#\": continue if line.startswith(\"Josefsson\") or line.startswith(\"Internet-Draft\"): continue output += line.replace(\"\\n\", \"\") Tests", "\"\" for line in file(\"test-vectors-00.txt\"): line = line.strip() if line == \"\" or", "line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v): if len(v) == 1: return", "len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"] = comps[5].strip()", "value = value.strip() if value[0] == '\"' and value[-1] == '\"': return map(get_byte,", "'\"' and value[-1] == '\"': return map(get_byte, re.findall(\"(\\\\\\\\x[0-9a-fA-F]{2}|.)\", value[1:-1].replace('\"\"', ''))) if value.lower() ==", "+= line.replace(\"\\n\", \"\") Tests = [ ] def get_byte(v): if len(v) == 1:", "get_string(comps[1]), output = get_string(comps[2]) ) if len(comps) >= 4: test[\"profile\"] = get_string(comps[3]) if", "get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6: test[\"rc\"]", "= get_string(comps[3]) if len(comps) >= 5: test[\"flags\"] = comps[4].strip() if len(comps) >= 6:" ]
[ "use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"):", "in supported_DBS ): p.print_help() return try: f = open(options.filePath) except IOError: print \"No", "the log files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\")", "prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The", "analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\",", "\"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message = message_match.group(1) else: message = \"CAN'T", "createEngine(options) multiLineMessage = False for line in f: line = line.strip() if (", "\"\"\" checks to see if the line starts with a date \"\"\" match", "Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files to generate a", "% options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def", "a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to", "session = Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process the file", "date = \"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level = level_match.group(1) else:", "= sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:]", "options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def processFile(f,", "a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ):", "end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see if the line", "= Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process the file and", "lineStartsWithDate( line ) ): pass else: logEntry = processLine( line ) session.add(logEntry) if", "level_match: level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match:", "except IOError: print \"No such file: %s\" % options.filePath raw_input(\"Press Enter to close", "= \"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level = level_match.group(1) else: level", "meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId,", "such file: %s\" % options.filePath raw_input(\"Press Enter to close window\") return processFile(f, options)", "\"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if ( not options.filePath", "= level_pattern.match(line) if level_match: level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match =", "in f: line = line.strip() if ( not lineStartsWithDate( line ) ): pass", "None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def main():", "= create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return", "= class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message =", "message_pattern.match(line) if message_match: message = message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match =", "insert into the DB \"\"\" session = createEngine(options) multiLineMessage = False for line", "options.verbose: print logEntry \"Commit at end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks", "file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see if the line starts with", ") ): pass else: logEntry = processLine( line ) session.add(logEntry) if options.verbose: print", "if the line starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line )", "= sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line) if", "session def processFile(f, options): \"\"\" Process the file and insert into the DB", "sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId =", "currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose", "create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern", "' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args()", "message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create", "False def processLine(line): \"\"\" Parse the line and create the entry to log", "\" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments", "at end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see if the", "for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\",", "files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the", "import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern =", "= p.parse_args() if ( not options.filePath ): p.print_help() return if ( not options.dbType", "= re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return", "re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\"", "message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def main(): \"\"\" Main entry point.", "= re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options):", "and insert into the DB \"\"\" session = createEngine(options) multiLineMessage = False for", "sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern =", "starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line", ") session.add(logEntry) if options.verbose: print logEntry \"Commit at end of file\" session.commit() def", "see if the line starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line", "\"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\",", "not lineStartsWithDate( line ) ): pass else: logEntry = processLine( line ) session.add(logEntry)", "meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for use \"\"\" LogEvent_table", "re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base", "= \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name =", "else: sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos", "Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process the file and insert", "def processLine(line): \"\"\" Parse the line and create the entry to log \"\"\"", "( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine)", "file and insert into the DB \"\"\" session = createEngine(options) multiLineMessage = False", "= re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern =", "return False def processLine(line): \"\"\" Parse the line and create the entry to", "): return True else: return False def processLine(line): \"\"\" Parse the line and", "engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine)", "(re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return False def processLine(line): \"\"\" Parse", "%s\" % options.filePath raw_input(\"Press Enter to close window\") return processFile(f, options) if __name__", "import time import re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from", "options,arguments = p.parse_args() if ( not options.filePath ): p.print_help() return if ( not", "date_pattern.match(line) if date_match: date = date_match.group(1) else: date = \"Invalid pattern\" level_match =", "class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match", "p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\",", "( not lineStartsWithDate( line ) ): pass else: logEntry = processLine( line )", "\"No such file: %s\" % options.filePath raw_input(\"Press Enter to close window\") return processFile(f,", "else: logEntry = processLine( line ) session.add(logEntry) if options.verbose: print logEntry \"Commit at", "return if ( not options.dbType in supported_DBS ): p.print_help() return try: f =", "re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for use \"\"\" LogEvent_table = LogEvent.__table__", "create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session", "import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3']", ")) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if (", "default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if ( not options.filePath ): p.print_help()", "log files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies", "file: %s\" % options.filePath raw_input(\"Press Enter to close window\") return processFile(f, options) if", "date = date_match.group(1) else: date = \"Invalid pattern\" level_match = level_pattern.match(line) if level_match:", "le def main(): \"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log", "else: date = \"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level = level_match.group(1)", "raw_input(\"Press Enter to close window\") return processFile(f, options) if __name__ == \"__main__\": main()", "entry to log \"\"\" date_match = date_pattern.match(line) if date_match: date = date_match.group(1) else:", "DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\")", "% options.filePath raw_input(\"Press Enter to close window\") return processFile(f, options) if __name__ ==", "message_match = message_pattern.match(line) if message_match: message = message_match.group(1) else: message = \"CAN'T PARSE\"", "def main(): \"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files", "multiLineMessage = False for line in f: line = line.strip() if ( not", "from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern =", "message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId =", "output\") options,arguments = p.parse_args() if ( not options.filePath ): p.print_help() return if (", "supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\")", "log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db file\",", "class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message = message_match.group(1)", "level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name", "= None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def", "\"\"\" Process the file and insert into the DB \"\"\" session = createEngine(options)", "the engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if (", "<reponame>timvoet/log_parser #!/usr/bin/env python from orm.model import LogEvent import optparse import time import re", "= message_pattern.match(line) if message_match: message = message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match", "line = line.strip() if ( not lineStartsWithDate( line ) ): pass else: logEntry", "time import re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative", "the line starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if", "= \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos =", "python from orm.model import LogEvent import optparse import time import re from sqlalchemy", "\"\"\" p = optparse.OptionParser(description=\"Parses the log files to generate a DB for analysis.\",", "line and create the entry to log \"\"\" date_match = date_pattern.match(line) if date_match:", "date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return", "if message_match: message = message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line)", "to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently", "p = optparse.OptionParser(description=\"Parses the log files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\",", "p.print_help() return try: f = open(options.filePath) except IOError: print \"No such file: %s\"", "= class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match =", "match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else:", "line starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\",", "re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for use \"\"\"", "sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match =", "sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for", "engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType", "db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" +", "( not options.filePath ): p.print_help() return if ( not options.dbType in supported_DBS ):", "): p.print_help() return try: f = open(options.filePath) except IOError: print \"No such file:", "sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None", "line ) session.add(logEntry) if options.verbose: print logEntry \"Commit at end of file\" session.commit()", "sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS =", "\"\"\" Create the engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata", "\"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine", "print \"No such file: %s\" % options.filePath raw_input(\"Press Enter to close window\") return", "logEntry \"Commit at end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see", "= ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern =", "\"Commit at end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see if", "class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def", "supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern", "sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process the", "= meetingId) return le def main(): \"\"\" Main entry point. \"\"\" p =", "p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if ( not", "session.commit() def lineStartsWithDate(line): \"\"\" checks to see if the line starts with a", "line.strip() if ( not lineStartsWithDate( line ) ): pass else: logEntry = processLine(", "metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process the file and insert into", "import optparse import time import re from sqlalchemy import create_engine from sqlalchemy.orm import", "meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId =", "p.print_help() return if ( not options.dbType in supported_DBS ): p.print_help() return try: f", "message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos", "the file and insert into the DB \"\"\" session = createEngine(options) multiLineMessage =", "): p.print_help() return if ( not options.dbType in supported_DBS ): p.print_help() return try:", "default=\"sqlite3\", help=\"Database format, currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\",", "= optparse.OptionParser(description=\"Parses the log files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog", "from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*')", "optparse import time import re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker", "import re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import", "action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\",", "if ( not options.filePath ): p.print_help() return if ( not options.dbType in supported_DBS", "message_match: message = message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if", "level_match = level_pattern.match(line) if level_match: level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match", "sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match:", "= meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id =", "entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files to generate a DB", "else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message = message_match.group(1) else:", "session_id=sessionId, meeting_id = meetingId) return le def main(): \"\"\" Main entry point. \"\"\"", "= LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose)", "= meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(),", "from orm.model import LogEvent import optparse import time import re from sqlalchemy import", "else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else:", "= level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name =", "analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\",", "format, currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables", "== \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session =", "le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def main(): \"\"\"", "class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message = message_match.group(1) else: message", "print logEntry \"Commit at end of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to", "log \"\"\" date_match = date_pattern.match(line) if date_match: date = date_match.group(1) else: date =", "the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db", "return True else: return False def processLine(line): \"\"\" Parse the line and create", "if date_match: date = date_match.group(1) else: date = \"Invalid pattern\" level_match = level_pattern.match(line)", "generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file", "meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le", "return le def main(): \"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the", "not options.filePath ): p.print_help() return if ( not options.dbType in supported_DBS ): p.print_help()", "to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\",", "else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return", "def createEngine(options): \"\"\" Create the engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata", "lineStartsWithDate(line): \"\"\" checks to see if the line starts with a date \"\"\"", "'.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if", "= sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match", "LogEvent import optparse import time import re from sqlalchemy import create_engine from sqlalchemy.orm", "p.parse_args() if ( not options.filePath ): p.print_help() return if ( not options.dbType in", "re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine", "= LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s'", "def processFile(f, options): \"\"\" Process the file and insert into the DB \"\"\"", "\"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\")", "LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine =", "the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \"", "session.add(logEntry) if options.verbose: print logEntry \"Commit at end of file\" session.commit() def lineStartsWithDate(line):", "return session def processFile(f, options): \"\"\" Process the file and insert into the", "= createEngine(options) multiLineMessage = False for line in f: line = line.strip() if", "level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern", "LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session", "supported_DBS ): p.print_help() return try: f = open(options.filePath) except IOError: print \"No such", "sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)')", "\"\"\" Parse the line and create the entry to log \"\"\" date_match =", "re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return False", "): pass else: logEntry = processLine( line ) session.add(logEntry) if options.verbose: print logEntry", "meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else:", "the DB \"\"\" session = createEngine(options) multiLineMessage = False for line in f:", "else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1)", "import LogEvent import optparse import time import re from sqlalchemy import create_engine from", "if options.verbose: print logEntry \"Commit at end of file\" session.commit() def lineStartsWithDate(line): \"\"\"", "checks to see if the line starts with a date \"\"\" match =", "PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId", "try: f = open(options.filePath) except IOError: print \"No such file: %s\" % options.filePath", "date_match = date_pattern.match(line) if date_match: date = date_match.group(1) else: date = \"Invalid pattern\"", "options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session", "processFile(f, options): \"\"\" Process the file and insert into the DB \"\"\" session", "= date_match.group(1) else: date = \"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level", "Parse the line and create the entry to log \"\"\" date_match = date_pattern.match(line)", "= None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\")", "LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' %", "+ ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments =", "def lineStartsWithDate(line): \"\"\" checks to see if the line starts with a date", "<path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store", "else: return False def processLine(line): \"\"\" Parse the line and create the entry", "store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports", "class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match:", "class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line)", "logEntry = processLine( line ) session.add(logEntry) if options.verbose: print logEntry \"Commit at end", "date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern", "for use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if ( options.dbType ==", "metadata = LogEvent.metadata if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath,", "pattern\" level_match = level_pattern.match(line) if level_match: level = level_match.group(1) else: level = \"UNKNOWN\"", "sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern", "= re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for use \"\"\" LogEvent_table =", "orm.model import LogEvent import optparse import time import re from sqlalchemy import create_engine", "--filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to", "meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId =", "if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return False def processLine(line): \"\"\"", "re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*')", "if ( not lineStartsWithDate( line ) ): pass else: logEntry = processLine( line", "level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1)", "line in f: line = line.strip() if ( not lineStartsWithDate( line ) ):", "\"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files to generate", "['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)')", "= LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def main(): \"\"\" Main", "verbose output\") options,arguments = p.parse_args() if ( not options.filePath ): p.print_help() return if", "= open(options.filePath) except IOError: print \"No such file: %s\" % options.filePath raw_input(\"Press Enter", "sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else:", "level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name", "for line in f: line = line.strip() if ( not lineStartsWithDate( line )", "message = message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match:", "IOError: print \"No such file: %s\" % options.filePath raw_input(\"Press Enter to close window\")", "sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId", "Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\"", "p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" + ' '.join(supported_DBS ))", "= date_pattern.match(line) if date_match: date = date_match.group(1) else: date = \"Invalid pattern\" level_match", "not options.dbType in supported_DBS ): p.print_help() return try: f = open(options.filePath) except IOError:", "= line.strip() if ( not lineStartsWithDate( line ) ): pass else: logEntry =", "DB \"\"\" session = createEngine(options) multiLineMessage = False for line in f: line", "processLine( line ) session.add(logEntry) if options.verbose: print logEntry \"Commit at end of file\"", ") if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return False def processLine(line):", "sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos =", "options.filePath raw_input(\"Press Enter to close window\") return processFile(f, options) if __name__ == \"__main__\":", "= re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the", "meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId)", "createEngine(options): \"\"\" Create the engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata =", "meetingId = meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id", "if ( not options.dbType in supported_DBS ): p.print_help() return try: f = open(options.filePath)", "line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True else: return False def", "line ) ): pass else: logEntry = processLine( line ) session.add(logEntry) if options.verbose:", "to see if the line starts with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\",", "Process the file and insert into the DB \"\"\" session = createEngine(options) multiLineMessage", "pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line)", "meeting_id = meetingId) return le def main(): \"\"\" Main entry point. \"\"\" p", "help=\"Enables verbose output\") options,arguments = p.parse_args() if ( not options.filePath ): p.print_help() return", "\"UNKNOWN\" class_name_match = class_name_pattern.match(line) if class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\"", "open(options.filePath) except IOError: print \"No such file: %s\" % options.filePath raw_input(\"Press Enter to", "meetingId) return le def main(): \"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses", "of file\" session.commit() def lineStartsWithDate(line): \"\"\" checks to see if the line starts", "None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId", "meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None", "default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" + ' '.join(supported_DBS", "= message_match.group(1) else: message = \"CAN'T PARSE\" sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId", "session = createEngine(options) multiLineMessage = False for line in f: line = line.strip()", "import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern", "main(): \"\"\" Main entry point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files to", "options): \"\"\" Process the file and insert into the DB \"\"\" session =", "help=\"Database format, currently supports \" + ' '.join(supported_DBS )) p.add_option(\"--verbose\", \"-v\", action=\"store_true\", default=False,", "processLine(line): \"\"\" Parse the line and create the entry to log \"\"\" date_match", "True else: return False def processLine(line): \"\"\" Parse the line and create the", "the entry to log \"\"\" date_match = date_pattern.match(line) if date_match: date = date_match.group(1)", "= re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern =", "\"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level = level_match.group(1) else: level =", "return try: f = open(options.filePath) except IOError: print \"No such file: %s\" %", "= meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None le", "\"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session = sessionmaker(bind=engine) session = Session()", "pass else: logEntry = processLine( line ) session.add(logEntry) if options.verbose: print logEntry \"Commit", "from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base supported_DBS", "sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1)", "False for line in f: line = line.strip() if ( not lineStartsWithDate( line", "with a date \"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line )", "the line and create the entry to log \"\"\" date_match = date_pattern.match(line) if", "file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the db file\", default=\"tomcat_stats.db\")", "= False for line in f: line = line.strip() if ( not lineStartsWithDate(", "if ( options.dbType == \"sqlite3\"): engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose) Session =", "= meetingId_pattern.match(line) if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:]", "action=\"store_true\", default=False, help=\"Enables verbose output\") options,arguments = p.parse_args() if ( not options.filePath ):", "LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId) return le def main(): \"\"\" Main entry", "file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format, currently supports \" + '", "usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path", "options.filePath ): p.print_help() return if ( not options.dbType in supported_DBS ): p.print_help() return", "sessionId_match = sessionId_pattern.match(line) if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId =", "re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*') message_pattern = re.compile(r'.*\\[.*\\]\\s-(.*)') sessionId_pattern = re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*')", "and create the entry to log \"\"\" date_match = date_pattern.match(line) if date_match: date", "action=\"store\", help=\"The path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\",", "f = open(options.filePath) except IOError: print \"No such file: %s\" % options.filePath raw_input(\"Press", "p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log file to analyze\") p.add_option(\"--dataStorePath\",\"-s\", action=\"store\", help=\"The path to store the", "\"\"\" session = createEngine(options) multiLineMessage = False for line in f: line =", "options.dbType in supported_DBS ): p.print_help() return try: f = open(options.filePath) except IOError: print", ") ): return True else: return False def processLine(line): \"\"\" Parse the line", "pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(),", "= re.compile(r'.*\\[(sessionId:[A-Z0-9]*)\\].*') meetingId_pattern = re.compile(r'.*\\[(meetingId:[A-Z0-9]*)\\].*') def createEngine(options): \"\"\" Create the engine for use", "( not options.dbType in supported_DBS ): p.print_help() return try: f = open(options.filePath) except", "if sessionId_match: sessionId = sessionId_match.group(1) pos = sessionId.index(\":\") sessionId = sessionId[pos+1:] else: sessionId", "level_pattern.match(line) if level_match: level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line)", "date_match.group(1) else: date = \"Invalid pattern\" level_match = level_pattern.match(line) if level_match: level =", "meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId = None le =", "= processLine( line ) session.add(logEntry) if options.verbose: print logEntry \"Commit at end of", "#!/usr/bin/env python from orm.model import LogEvent import optparse import time import re from", "to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath <path_to_file>\") p.add_option(\"--filePath\",\"-f\",action=\"store\",help=\"specifies the log", "to log \"\"\" date_match = date_pattern.match(line) if date_match: date = date_match.group(1) else: date", "create the entry to log \"\"\" date_match = date_pattern.match(line) if date_match: date =", "point. \"\"\" p = optparse.OptionParser(description=\"Parses the log files to generate a DB for", "Create the engine for use \"\"\" LogEvent_table = LogEvent.__table__ metadata = LogEvent.metadata if", "= sessionId[pos+1:] else: sessionId = None meetingId_match = meetingId_pattern.match(line) if meetingId_match: meetingId =", "if meetingId_match: meetingId = meetingId_match.group(1) pos = meetingId.index(\":\") meetingId = meetingId[pos+1:] else: meetingId", "f: line = line.strip() if ( not lineStartsWithDate( line ) ): pass else:", "\"\"\" date_match = date_pattern.match(line) if date_match: date = date_match.group(1) else: date = \"Invalid", "= sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def processFile(f, options): \"\"\" Process", "into the DB \"\"\" session = createEngine(options) multiLineMessage = False for line in", "help=\"The path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database", "echo=options.verbose) Session = sessionmaker(bind=engine) session = Session() metadata.create_all(engine) return session def processFile(f, options):", "= \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message = message_match.group(1) else: message =", "\"\"\" match = re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) if (re.search(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\", line ) ): return True", "if level_match: level = level_match.group(1) else: level = \"UNKNOWN\" class_name_match = class_name_pattern.match(line) if", "declarative_base supported_DBS = ['sqlite3'] date_pattern = re.compile(r'(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)') level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*') class_name_pattern = re.compile(r'.*\\[([a-zA-Z0-9]*\\.[a-zA-Z0-9\\.]*).*')", "date_match: date = date_match.group(1) else: date = \"Invalid pattern\" level_match = level_pattern.match(line) if", "optparse.OptionParser(description=\"Parses the log files to generate a DB for analysis.\", prog=\"log_analyzer\",version=\"0.1\", usage=\"%prog --filePath", "line ) ): return True else: return False def processLine(line): \"\"\" Parse the", "if class_name_match: class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if", "path to store the db file\", default=\"tomcat_stats.db\") p.add_option(\"--dbType\", \"-d\", action=\"store\", default=\"sqlite3\", help=\"Database format,", "class_name = class_name_match.group(1) else: class_name = \"UNKNOWN\" message_match = message_pattern.match(line) if message_match: message" ]
[ "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) ) self.assertEqual( 1, comparator.compare(job2, job1) )", "order. \"\"\" job1 = Mock() job1.name.value = 'A' job2 = Mock() job2.name.value =", "Unless required by applicable law or agreed to in writing, software # distributed", "that the comparator will sort jobs in alphabetical order. \"\"\" job1 = Mock()", "See the # License for the specific language governing permissions and limitations #", "# under the License. \"\"\" from unittest import TestCase from unittest.mock import Mock", "'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) ) self.assertEqual( 1, comparator.compare(job2, job1)", "\"License\"); you may # not use this file except in compliance with the", "are equal if they share the same name. \"\"\" job1 = Mock() job1.name.value", "Copyright 2022 Red Hat # # Licensed under the Apache License, Version 2.0", "cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "the License. You may obtain # a copy of the License at #", "= 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) ) self.assertEqual( 1, comparator.compare(job2,", "import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two", "law or agreed to in writing, software # distributed under the License is", "the comparator will sort jobs in alphabetical order. \"\"\" job1 = Mock() job1.name.value", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "job1 = Mock() job1.name.value = 'A' job2 = Mock() job2.name.value = 'B' comparator", "express or implied. See the # License for the specific language governing permissions", "they share the same name. \"\"\" job1 = Mock() job1.name.value = 'job' job2", "test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs in alphabetical order. \"\"\" job1", "comparator will sort jobs in alphabetical order. \"\"\" job1 = Mock() job1.name.value =", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "not use this file except in compliance with the License. You may obtain", "equal if they share the same name. \"\"\" job1 = Mock() job1.name.value =", "comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs in", "test_names_are_equal(self): \"\"\"Checks that two jobs are equal if they share the same name.", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "with the License. You may obtain # a copy of the License at", "\"\"\" from unittest import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "the License. \"\"\" from unittest import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs", "License for the specific language governing permissions and limitations # under the License.", "the specific language governing permissions and limitations # under the License. \"\"\" from", "2.0 (the \"License\"); you may # not use this file except in compliance", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort", "Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self):", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "use this file except in compliance with the License. You may obtain #", "will sort jobs in alphabetical order. \"\"\" job1 = Mock() job1.name.value = 'A'", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "compliance with the License. You may obtain # a copy of the License", "License, Version 2.0 (the \"License\"); you may # not use this file except", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs in alphabetical order. \"\"\"", "def test_names_are_equal(self): \"\"\"Checks that two jobs are equal if they share the same", "limitations # under the License. \"\"\" from unittest import TestCase from unittest.mock import", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "implied. See the # License for the specific language governing permissions and limitations", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "OF ANY KIND, either express or implied. See the # License for the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "two jobs are equal if they share the same name. \"\"\" job1 =", "language governing permissions and limitations # under the License. \"\"\" from unittest import", "TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for", "job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self):", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator will", "\"\"\" job1 = Mock() job1.name.value = 'A' job2 = Mock() job2.name.value = 'B'", "unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\"", ":class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are equal if they share", "you may # not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "(the \"License\"); you may # not use this file except in compliance with", "governing permissions and limitations # under the License. \"\"\" from unittest import TestCase", "KIND, either express or implied. See the # License for the specific language", "may # not use this file except in compliance with the License. You", "from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`.", "either express or implied. See the # License for the specific language governing", "same name. \"\"\" job1 = Mock() job1.name.value = 'job' job2 = Mock() job2.name.value", "'job' job2 = Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1,", "# # Unless required by applicable law or agreed to in writing, software", "file except in compliance with the License. You may obtain # a copy", "# Copyright 2022 Red Hat # # Licensed under the Apache License, Version", "= Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) )", "this file except in compliance with the License. You may obtain # a", "# Unless required by applicable law or agreed to in writing, software #", "Hat # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "= Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) )", "by applicable law or agreed to in writing, software # distributed under the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "Mock() job1.name.value = 'job' job2 = Mock() job2.name.value = 'job' comparator = SortJobsByName()", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "or implied. See the # License for the specific language governing permissions and", "class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are", "that two jobs are equal if they share the same name. \"\"\" job1", "= Mock() job1.name.value = 'job' job2 = Mock() job2.name.value = 'job' comparator =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "2022 Red Hat # # Licensed under the Apache License, Version 2.0 (the", "for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are equal if they", "job1.name.value = 'A' job2 = Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual(", "specific language governing permissions and limitations # under the License. \"\"\" from unittest", "\"\"\"Checks that the comparator will sort jobs in alphabetical order. \"\"\" job1 =", "job2 = Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2)", "License. You may obtain # a copy of the License at # #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) ) self.assertEqual(", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "= 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks", "for the specific language governing permissions and limitations # under the License. \"\"\"", "comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the", "\"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are equal if they share the", "= 'A' job2 = Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1,", "in alphabetical order. \"\"\" job1 = Mock() job1.name.value = 'A' job2 = Mock()", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "name. \"\"\" job1 = Mock() job1.name.value = 'job' job2 = Mock() job2.name.value =", "Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def", "Mock() job1.name.value = 'A' job2 = Mock() job2.name.value = 'B' comparator = SortJobsByName()", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "under the License. \"\"\" from unittest import TestCase from unittest.mock import Mock from", "job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs in alphabetical", "ANY KIND, either express or implied. See the # License for the specific", "the # License for the specific language governing permissions and limitations # under", "except in compliance with the License. You may obtain # a copy of", "= SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that the comparator", "\"\"\" # Copyright 2022 Red Hat # # Licensed under the Apache License,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "job1 = Mock() job1.name.value = 'job' job2 = Mock() job2.name.value = 'job' comparator", "TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are equal", "to in writing, software # distributed under the License is distributed on an", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\" job1 = Mock() job1.name.value = 'job' job2 = Mock() job2.name.value = 'job'", "sort jobs in alphabetical order. \"\"\" job1 = Mock() job1.name.value = 'A' job2", "= 'job' job2 = Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0,", "from unittest import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class", "share the same name. \"\"\" job1 = Mock() job1.name.value = 'job' job2 =", "required by applicable law or agreed to in writing, software # distributed under", "import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests", "= Mock() job1.name.value = 'A' job2 = Mock() job2.name.value = 'B' comparator =", "applicable law or agreed to in writing, software # distributed under the License", "SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs", "License. \"\"\" from unittest import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import", "import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "permissions and limitations # under the License. \"\"\" from unittest import TestCase from", "Red Hat # # Licensed under the Apache License, Version 2.0 (the \"License\");", "and limitations # under the License. \"\"\" from unittest import TestCase from unittest.mock", "job2 = Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2)", "\"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks that two jobs are equal if", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "in compliance with the License. You may obtain # a copy of the", "# not use this file except in compliance with the License. You may", "unittest import TestCase from unittest.mock import Mock from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase):", "or agreed to in writing, software # distributed under the License is distributed", "the same name. \"\"\" job1 = Mock() job1.name.value = 'job' job2 = Mock()", "# License for the specific language governing permissions and limitations # under the", "jobs are equal if they share the same name. \"\"\" job1 = Mock()", "alphabetical order. \"\"\" job1 = Mock() job1.name.value = 'A' job2 = Mock() job2.name.value", "job1.name.value = 'job' job2 = Mock() job2.name.value = 'job' comparator = SortJobsByName() self.assertEqual(", ") def test_alphabetical_order(self): \"\"\"Checks that the comparator will sort jobs in alphabetical order.", "'A' job2 = Mock() job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1,", "jobs in alphabetical order. \"\"\" job1 = Mock() job1.name.value = 'A' job2 =", "from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName class TestSortJobsByName(TestCase): \"\"\"Tests for :class:`SortJobsByName`. \"\"\" def test_names_are_equal(self): \"\"\"Checks", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "\"\"\"Checks that two jobs are equal if they share the same name. \"\"\"", "Version 2.0 (the \"License\"); you may # not use this file except in", "'job' comparator = SortJobsByName() self.assertEqual( 0, comparator.compare(job1, job2) ) def test_alphabetical_order(self): \"\"\"Checks that", "if they share the same name. \"\"\" job1 = Mock() job1.name.value = 'job'", "job2.name.value = 'B' comparator = SortJobsByName() self.assertEqual( -1, comparator.compare(job1, job2) ) self.assertEqual( 1," ]
[ "def pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings of Pauli generators on", "Z) for item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"]", "must be >= 1 to construct generators.\") if x_loc is None: return [\"I\"", "times that state was observed, e.g. {'1001' : 24, '1000' : 36}, etc.", "pairs are computational basis states and number of times that state was observed,", "ValueError(\"Input for gray code construction must be a positive integer.\") if N ==", "s1]) or any([x != \"0\" and x != \"1\" for x in s2]):", "substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators", "1: raise ValueError(\"Number of Paulis must be >= 1 to construct generators.\") if", "substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] # Expand out the term into", "in s1]) or any([x != \"0\" and x != \"1\" for x in", "1: raise ValueError(f\"Strings {s1} and {s2} are not ordered in a gray code.\")", "[0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0,", "np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\"", "code must have the same length.\") if any([x != \"0\" and x !=", "that are not Paulis or P0/P1 projectors.\") prefactor = 1 / (2 **", "in sub_code] + [\"1\" + x for x in sub_code[::-1]] def find_flipped_bit(s1, s2):", "QubitOperator # Copy the sequence before making replacements substitution_seq = seq if len(seq)", "sequence.\") if any([x not in mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq}", "= QubitOperator() for term in qubit_operators: full_operator += term return full_operator def pauli_generators(N,", "for x in s1]) or any([x != \"0\" and x != \"1\" for", "x, y: x*y, num_z_and_1) # Count number of +1 and -1 outcomes, i.e.", "[list(\"I\" * idx + \"Z\" + \"I\" * (N - idx - 2))", "0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])),", "number of times that state was observed, e.g. {'1001' : 24, '1000' :", "== \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\",", "<reponame>aemccoy/GrayCode-QubitEncoding import numpy as np from openfermion.ops import QubitOperator from itertools import product", "N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for", "elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2} are not ordered in", "have two qubits, need to add I to the generator list if N", "= {} for basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1'", "sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in a gray code,", "ValueError(f\"Strings {s1} and {s2} are the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings", "before making replacements substitution_seq = seq if len(seq) <= 0: raise ValueError(f\"Cannot expand", "\"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] #", "or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\"", "= sum(meas_results.values()) if any([op not in ['I', 'X', 'Y', 'Z'] for op in", "Base case return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\" + x", "# Copy the sequence before making replacements substitution_seq = seq if len(seq) <=", "range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2} are the same.\")", "compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the results of an experiment", ": 36}, etc. Returns: The expectation value of pauli. \"\"\" pauli_list = list(pauli)", "def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in a gray code, determine", "Pauli based on the measurement outcomes observed in result. Parameters: pauli (string): A", "in pauli_list]): raise ValueError(\"Pauli string must consist only of I, X, Y, or", "num_z_and_1) # Count number of +1 and -1 outcomes, i.e. 0 and 1", "For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI',", "n_shots = sum(meas_results.values()) if any([op not in ['I', 'X', 'Y', 'Z'] for op", "only of I, X, Y, or Z.\") # Determine whether the computational basis", "= [list(\"I\" * idx + \"Z\" + \"I\" * (N - idx -", "meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else", "to compute, e.g. \"ZZIZZ\". Tensor products are computed from left to right here.", "string must consist only of I, X, Y, or Z.\") # Determine whether", "only in the slot # where we flipped a bit string_sums = [(int(s1[i])", "in gray code must have the same length.\") if any([x != \"0\" and", "expand_projector_sequence(seq): # Take a list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand", "'X', 'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli string must consist only", "basis states and number of times that state was observed, e.g. {'1001' :", "eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] ==", "eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] ==", "# eigenstates of the Pauli in question. eigenvalues = {} for basis_state in", "= [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1 for", "for idx in range(N - 1)] # If we have two qubits, need", "X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli,", "expand empty projector sequence.\") if any([x not in mats.keys() for x in seq]):", "\"I\" * (N - idx - 2)) for idx in range(N - 1)]", "import product from functools import reduce mats = {\"I\" : np.eye(2), \"X\" :", "'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli string must consist only of", "for idx in range(N)] else: if x_loc < 0 or x_loc > N:", "bit string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))] if", "the computational basis states in meas_results are +1 or -1 # eigenstates of", "a positive integer.\") if N == 1: # Base case return [\"0\", \"1\"]", "measurement outcomes observed in result. Parameters: pauli (string): A string indicating the Pauli", "are the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2} are", "for term in qubit_operators: full_operator += term return full_operator def pauli_generators(N, x_loc=None): \"\"\"", "is None: return [\"I\" * idx + \"Z\" + \"I\" * (N -", "and label Paulis with their qubit indices qubit_operator_string = \"\" for qubit_idx in", "substitution_seq = seq if len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector sequence.\")", "value we want to compute, e.g. \"ZZIZZ\". Tensor products are computed from left", "in meas_results are +1 or -1 # eigenstates of the Pauli in question.", "pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis with their qubit", "** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with 0.5 (1", "N qubits. If x_loc is set to an integer, then we will construct", "if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx in", "idx - 2)) for idx in range(N - 1)] # If we have", "range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) # Count number of +1", "for gray code construction must be a positive integer.\") if N == 1:", "int(s2[i])) % 2 for i in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings", "= [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators =", "of strings of Pauli generators on N qubits. If x_loc is set to", "sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and", "1)] # If we have two qubits, need to add I to the", "an openfermion QubitOperator # Copy the sequence before making replacements substitution_seq = seq", "!= \"1\" for x in s1]) or any([x != \"0\" and x !=", "Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute", "strings of Pauli generators on N qubits. If x_loc is set to an", "!= len(s2): raise ValueError(\"Strings compared in gray code must have the same length.\")", "# First, replace P0 and P1 with 0.5 (1 +- Z) for item_idx", "len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in ['I', 'X', 'Y', 'Z'] for", "same length.\") if any([x != \"0\" and x != \"1\" for x in", "of times that state was observed, e.g. {'1001' : 24, '1000' : 36},", "and remove the m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\",", "# If we have two qubits, need to add I to the generator", "we flipped a bit string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i", "gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute its", "code for traversing the N qubit states. \"\"\" if N <= 0 or", "qubit_operators: full_operator += term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list", "!= \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term", "qubit_operators = [] # Expand out the term into individual Paulis for pauli", "<= 0: raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x not in mats.keys()", "seq]): raise ValueError(f\"Sequence {seq} contains elements that are not Paulis or P0/P1 projectors.\")", "openfermion.ops import QubitOperator from itertools import product from functools import reduce mats =", "\"Z\" + \"I\" * (N - idx - 1) for idx in range(N)]", "mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" :", "construction must be a positive integer.\") if N == 1: # Base case", "qubit is set to X and the remaining qubits contain the generators of", "indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities", "base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute its matrix representation.", "= list(pauli) if any([op not in ['I', 'X', 'Y', 'Z'] for op in", "of Paulis # return an openfermion QubitOperator # Copy the sequence before making", "the sign and remove the m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string", "num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1]) return (num_0_outcomes", "compute, e.g. \"ZZIZZ\". Tensor products are computed from left to right here. \"\"\"", "where we flipped a bit string_sums = [(int(s1[i]) + int(s2[i])) % 2 for", "raise ValueError(\"Number of Paulis must be >= 1 to construct generators.\") if x_loc", "1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes", "or len(s2) == 0: raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise", "P0/P1 projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First,", "computed from left to right here. \"\"\" pauli_list = list(pauli) if any([op not", "def expand_projector_sequence(seq): # Take a list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and", "sum(meas_results.values()) if any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]):", "key in eigenvalues.keys() if eigenvalues[key] == -1]) return (num_0_outcomes - num_1_outcomes) / n_shots", "# Base case return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\" +", "{s1} and {s2} are the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1}", "'Z'] for op in pauli_list]): raise ValueError(\"Pauli string must consist only of I,", "or P0/P1 projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) #", "if x_loc < 0 or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X", "x in sub_code] + [\"1\" + x for x in sub_code[::-1]] def find_flipped_bit(s1,", "full_operator = QubitOperator() for term in qubit_operators: full_operator += term return full_operator def", "0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0],", "compared in gray code must have the same length.\") if any([x != \"0\"", "whose expectation value we want to compute, e.g. \"ZZIZZ\". Tensor products are computed", "eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) # Count number of +1 and", "for op in pauli_list]): raise ValueError(\"Pauli string must consist only of I, X,", "<= 0 or type(N) is not int: raise ValueError(\"Input for gray code construction", "in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign and remove the m", "sub_code = gray_code(N-1) return [\"0\" + x for x in sub_code] + [\"1\"", "\"Z\" + \"I\" * (N - idx - 2)) for idx in range(N", "{s2} is not a valid binary string.\") # Sum the strings elementwise modulo", "The expectation value of pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots", "\"\"\" Generate a Gray code for traversing the N qubit states. \"\"\" if", "itertools import product from functools import reduce mats = {\"I\" : np.eye(2), \"X\"", "[0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate a", "\"\"\" if N <= 0 or type(N) is not int: raise ValueError(\"Input for", "gray code construction must be a positive integer.\") if N == 1: #", "generators on N qubits. If x_loc is set to an integer, then we", "Tensor products are computed from left to right here. \"\"\" pauli_list = list(pauli)", "0 and 1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] ==", "positive integer.\") if N == 1: # Base case return [\"0\", \"1\"] else:", "return [\"0\" + x for x in sub_code] + [\"1\" + x for", "if len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x not", "in result. Parameters: pauli (string): A string indicating the Pauli whose expectation value", "containing the results of an experiment run on qiskit. The key value pairs", "string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))] if string_sums.count(1)", "here. \"\"\" pauli_list = list(pauli) if any([op not in ['I', 'X', 'Y', 'Z']", "return the expectation value of a given Pauli based on the measurement outcomes", "2 for i in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and", "- 2)) for idx in range(N - 1)] # If we have two", "'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1:", "= (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and label", "- 1)] # If we have two qubits, need to add I to", "in ['I', 'X', 'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli string must", "ValueError(f\"Sequence {seq} contains elements that are not Paulis or P0/P1 projectors.\") prefactor =", "set to X and the remaining qubits contain the generators of N -", "0], [0, 1]]))} def gray_code(N): \"\"\" Generate a Gray code for traversing the", "\"I\" * (N - idx - 1) for idx in range(N)] else: if", "pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis with their qubit indices qubit_operator_string", "base_generators = [list(\"I\" * idx + \"Z\" + \"I\" * (N - idx", "\"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in", "x*y, num_z_and_1) # Count number of +1 and -1 outcomes, i.e. 0 and", "raise ValueError(\"Pauli string must consist only of I, X, Y, or Z.\") #", "in range(N)] else: if x_loc < 0 or x_loc > N: raise ValueError(f\"Invalid", "for x in sub_code] + [\"1\" + x for x in sub_code[::-1]] def", "pauli (string): A string indicating the Pauli whose expectation value we want to", "x_loc < 0 or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X in", "need to add I to the generator list if N == 2: base_generators.append([\"I\"])", "[\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = []", "2; the sum will be 1 only in the slot # where we", "on the measurement outcomes observed in result. Parameters: pauli (string): A string indicating", "run on qiskit. The key value pairs are computational basis states and number", "* (N - idx - 2)) for idx in range(N - 1)] #", "qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string +=", "and expand it in terms of Paulis # return an openfermion QubitOperator #", "elements that are not Paulis or P0/P1 projectors.\") prefactor = 1 / (2", "Take a Pauli string and compute its matrix representation. Parameters: pauli (string): A", "one that was flipped. \"\"\" if len(s1) == 0 or len(s2) == 0:", "pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation value of a given Pauli", "** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis with", "Pauli.\") base_generators = [list(\"I\" * idx + \"Z\" + \"I\" * (N -", "gray_code(N-1) return [\"0\" + x for x in sub_code] + [\"1\" + x", "as np from openfermion.ops import QubitOperator from itertools import product from functools import", "a list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it in terms", "pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x,", "placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx + \"Z\"", "experiment run on qiskit. The key value pairs are computational basis states and", "'1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] =", "is set to X and the remaining qubits contain the generators of N", "generators.\") if x_loc is None: return [\"I\" * idx + \"Z\" + \"I\"", "for key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key", "import numpy as np from openfermion.ops import QubitOperator from itertools import product from", "and {s2} are the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and", "X, Y, or Z.\") # Determine whether the computational basis states in meas_results", "X in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx + \"Z\" + \"I\"", "must have the same length.\") if any([x != \"0\" and x != \"1\"", "out the term into individual Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli)", "ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared in gray code", "P1 with 0.5 (1 +- Z) for item_idx in range(len(seq)): if seq[item_idx] ==", "pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation value of a", "we want to compute, e.g. \"ZZIZZ\". Tensor products are computed from left to", "code, determine which bit is the one that was flipped. \"\"\" if len(s1)", "[1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0,", "on N qubits where the x_loc qubit is set to X and the", "[mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the", "\" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators: full_operator += term", "in qubit_operators: full_operator += term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a", "term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings of", "and {s2} are not ordered in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq):", "to an integer, then we will construct the generators on N qubits where", "\"ZZIZZ\" meas_results (Dict): A dictionary containing the results of an experiment run on", "the remaining qubits contain the generators of N - 1 qubits. For example,", "> N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\"", "string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared in gray code must", "x_loc is None: return [\"I\" * idx + \"Z\" + \"I\" * (N", "e.g. {'1001' : 24, '1000' : 36}, etc. Returns: The expectation value of", "range(N - 1)] # If we have two qubits, need to add I", "remove the m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\")", "np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0,", "raise ValueError(\"Strings compared in gray code must have the same length.\") if any([x", "elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] # Expand", "dictionary containing the results of an experiment run on qiskit. The key value", "question. eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx]", "pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign and remove the", "# Extract the sign and remove the m indicators sign = (-1) **", "{} for basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and", "range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\":", "== 0: raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared", "A dictionary containing the results of an experiment run on qiskit. The key", "-1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))}", "inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared in gray code must have", "i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key]", "+ [\"1\" + x for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For", "contains elements that are not Paulis or P0/P1 projectors.\") prefactor = 1 /", "+= f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators: full_operator", "expectation value we want to compute, e.g. \"ZZIZZ\". Tensor products are computed from", "from itertools import product from functools import reduce mats = {\"I\" : np.eye(2),", "idx + \"Z\" + \"I\" * (N - idx - 2)) for idx", ": 24, '1000' : 36}, etc. Returns: The expectation value of pauli. \"\"\"", "numpy as np from openfermion.ops import QubitOperator from itertools import product from functools", "is not a valid binary string.\") # Sum the strings elementwise modulo 2;", "(N - idx - 1) for idx in range(N)] else: if x_loc <", "+ x for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent", "states in meas_results are +1 or -1 # eigenstates of the Pauli in", "to right here. \"\"\" pauli_list = list(pauli) if any([op not in ['I', 'X',", "[\"1\" + x for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two", "for item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif", "flipped a bit string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in", "range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator =", "Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A", "bit is the one that was flipped. \"\"\" if len(s1) == 0 or", "\"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\"", "or type(N) is not int: raise ValueError(\"Input for gray code construction must be", "qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators: full_operator += term return", "must consist only of I, X, Y, or Z.\") # Determine whether the", "ValueError(\"Number of Paulis must be >= 1 to construct generators.\") if x_loc is", "the m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") #", "0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate", "the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\" meas_results (Dict):", "to construct generators.\") if x_loc is None: return [\"I\" * idx + \"Z\"", "for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string,", "-1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0],", "# Expand out the term into individual Paulis for pauli in product(*substitution_seq): pauli_string", "qubits. If x_loc is set to an integer, then we will construct the", "['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N", "of Paulis must be >= 1 to construct generators.\") if x_loc is None:", "\"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\"", "# Remove identities and label Paulis with their qubit indices qubit_operator_string = \"\"", "< 1: raise ValueError(\"Number of Paulis must be >= 1 to construct generators.\")", "the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2} are not", "m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove", "None: return [\"I\" * idx + \"Z\" + \"I\" * (N - idx", "\"\"\" Compute and return the expectation value of a given Pauli based on", "any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli", "+ x for x in sub_code] + [\"1\" + x for x in", "flipped. \"\"\" if len(s1) == 0 or len(s2) == 0: raise ValueError(\"Empty string", "e.g. [\"P0\", \"P1\", \"X\"] and expand it in terms of Paulis # return", "['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number of Paulis must", "a given Pauli based on the measurement outcomes observed in result. Parameters: pauli", "term into individual Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract", "string.\") # Sum the strings elementwise modulo 2; the sum will be 1", "and 1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1])", "in the slot # where we flipped a bit string_sums = [(int(s1[i]) +", "base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators]", "(1 +- Z) for item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] =", "based on the measurement outcomes observed in result. Parameters: pauli (string): A string", "np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1,", "string indicating the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\"", "1 only in the slot # where we flipped a bit string_sums =", "Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results):", "or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators", "= \"\".join(pauli) # Extract the sign and remove the m indicators sign =", "idx in range(N)] else: if x_loc < 0 or x_loc > N: raise", "the generators of N - 1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII',", "[\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and", "gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a list of projectors, e.g.", "the generator list if N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc,", "in question. eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1 = [-1 if", "the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\". Tensor products", "\"\"\" For two adjacent elements in a gray code, determine which bit is", "etc. Returns: The expectation value of pauli. \"\"\" pauli_list = list(pauli) n_qubits =", "- 1) for idx in range(N)] else: if x_loc < 0 or x_loc", "Gray code for traversing the N qubit states. \"\"\" if N <= 0", "be >= 1 to construct generators.\") if x_loc is None: return [\"I\" *", "states and number of times that state was observed, e.g. {'1001' : 24,", "num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes =", "from openfermion.ops import QubitOperator from itertools import product from functools import reduce mats", ": np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" :", "in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys()", "\"mZ\"] qubit_operators = [] # Expand out the term into individual Paulis for", "products are computed from left to right here. \"\"\" pauli_list = list(pauli) if", "Returns: The expectation value of pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list)", "['I', 'X', 'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli string must consist", "will be 1 only in the slot # where we flipped a bit", "not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]): raise ValueError(\"Pauli string", "QubitOperator() for term in qubit_operators: full_operator += term return full_operator def pauli_generators(N, x_loc=None):", "an integer, then we will construct the generators on N qubits where the", "their qubit indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] !=", "list of strings of Pauli generators on N qubits. If x_loc is set", "from left to right here. \"\"\" pauli_list = list(pauli) if any([op not in", "remaining qubits contain the generators of N - 1 qubits. For example, pauli_generators(4)", "# where we flipped a bit string_sums = [(int(s1[i]) + int(s2[i])) % 2", "the results of an experiment run on qiskit. The key value pairs are", "label Paulis with their qubit indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)):", "compute its matrix representation. Parameters: pauli (string): A string indicating the Pauli whose", "for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation", "Paulis or P0/P1 projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\")))", "and pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda", "x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in a", "in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I')", "functools import reduce mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1,", "raise ValueError(f\"One of inputs {s1}, {s2} is not a valid binary string.\") #", "x in s2]): raise ValueError(f\"One of inputs {s1}, {s2} is not a valid", "expectation value of a given Pauli based on the measurement outcomes observed in", "projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace", "of I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list])", "are not Paulis or P0/P1 projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\")", "for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in", "= 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and", "qubits contain the generators of N - 1 qubits. For example, pauli_generators(4) =", "raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared in gray", "in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx + \"Z\" + \"I\" *", "== 0: raise ValueError(f\"Strings {s1} and {s2} are the same.\") elif string_sums.count(1) >", "qubit states. \"\"\" if N <= 0 or type(N) is not int: raise", "(-1) ** pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis", "# Count number of +1 and -1 outcomes, i.e. 0 and 1 num_0_outcomes", "Take a list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it in", "was flipped. \"\"\" if len(s1) == 0 or len(s2) == 0: raise ValueError(\"Empty", "gray_code(N): \"\"\" Generate a Gray code for traversing the N qubit states. \"\"\"", "an experiment run on qiskit. The key value pairs are computational basis states", "individual Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign", "or Z.\") # Determine whether the computational basis states in meas_results are +1", "+ int(s2[i])) % 2 for i in range(len(s1))] if string_sums.count(1) == 0: raise", "we want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the results", "raise ValueError(\"Input for gray code construction must be a positive integer.\") if N", "idx + \"Z\" + \"I\" * (N - idx - 1) for idx", "qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI',", "not int: raise ValueError(\"Input for gray code construction must be a positive integer.\")", "x_loc=None): \"\"\" Construct a list of strings of Pauli generators on N qubits.", "computational basis states and number of times that state was observed, e.g. {'1001'", "contain the generators of N - 1 qubits. For example, pauli_generators(4) = ['ZIII',", "qubits where the x_loc qubit is set to X and the remaining qubits", "Pauli string and compute its matrix representation. Parameters: pauli (string): A string indicating", "For two adjacent elements in a gray code, determine which bit is the", "Z.\") # Determine whether the computational basis states in meas_results are +1 or", "Expand out the term into individual Paulis for pauli in product(*substitution_seq): pauli_string =", "and x != \"1\" for x in s1]) or any([x != \"0\" and", "and P1 with 0.5 (1 +- Z) for item_idx in range(len(seq)): if seq[item_idx]", "generators on N qubits where the x_loc qubit is set to X and", "(substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with 0.5 (1 +-", "if any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]): raise", "the sequence before making replacements substitution_seq = seq if len(seq) <= 0: raise", "raise ValueError(\"Pauli string must consist only of I, X, Y, or Z.\") return", "pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for", "[-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx", "its matrix representation. Parameters: pauli (string): A string indicating the Pauli whose expectation", "ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx +", "{'1001' : 24, '1000' : 36}, etc. Returns: The expectation value of pauli.", "construct the generators on N qubits where the x_loc qubit is set to", "range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take", "x != \"1\" for x in s1]) or any([x != \"0\" and x", "return [\"I\" * idx + \"Z\" + \"I\" * (N - idx -", "s2): \"\"\" For two adjacent elements in a gray code, determine which bit", "to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the results of an", "result. Parameters: pauli (string): A string indicating the Pauli whose expectation value we", "-1 outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys()", "/ (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with", "find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in a gray code, determine which", "{seq} contains elements that are not Paulis or P0/P1 projectors.\") prefactor = 1", "Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign and", "in s2]): raise ValueError(f\"One of inputs {s1}, {s2} is not a valid binary", "of the Pauli in question. eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1", "= sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key]", "of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it in terms of Paulis", "basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] !=", "modulo 2; the sum will be 1 only in the slot # where", "if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key]", "'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N <", "Paulis # return an openfermion QubitOperator # Copy the sequence before making replacements", "in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements in a gray", "gray code must have the same length.\") if any([x != \"0\" and x", "= {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0,", "idx - 1) for idx in range(N)] else: if x_loc < 0 or", "> 1: raise ValueError(f\"Strings {s1} and {s2} are not ordered in a gray", "full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings of Pauli generators", "seq if len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x", "[1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0,", "= pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis with their qubit indices", "two qubits, need to add I to the generator list if N ==", "e.g. \"ZZIZZ\". Tensor products are computed from left to right here. \"\"\" pauli_list", "list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in ['I', 'X',", "of I, X, Y, or Z.\") # Determine whether the computational basis states", "not a valid binary string.\") # Sum the strings elementwise modulo 2; the", "\"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"]", "for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute", "\"\"\" Take a Pauli string and compute its matrix representation. Parameters: pauli (string):", "# Sum the strings elementwise modulo 2; the sum will be 1 only", "(basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)]", "reduce mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\"", "be 1 only in the slot # where we flipped a bit string_sums", "valid binary string.\") # Sum the strings elementwise modulo 2; the sum will", "* idx + \"Z\" + \"I\" * (N - idx - 2)) for", "the N qubit states. \"\"\" if N <= 0 or type(N) is not", "meas_results are +1 or -1 # eigenstates of the Pauli in question. eigenvalues", "+ substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with 0.5 (1 +- Z)", "< 0 or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit", "ValueError(\"Pauli string must consist only of I, X, Y, or Z.\") return reduce(np.kron,", "from functools import reduce mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1],", "ordered in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a list", "ValueError(f\"Cannot expand empty projector sequence.\") if any([x not in mats.keys() for x in", "Paulis must be >= 1 to construct generators.\") if x_loc is None: return", "of pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if", "string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2} are not ordered in a", "return string_sums.index(1) def expand_projector_sequence(seq): # Take a list of projectors, e.g. [\"P0\", \"P1\",", "are +1 or -1 # eigenstates of the Pauli in question. eigenvalues =", "[\"I\" * idx + \"Z\" + \"I\" * (N - idx - 1)", "Count number of +1 and -1 outcomes, i.e. 0 and 1 num_0_outcomes =", "to add I to the generator list if N == 2: base_generators.append([\"I\"]) for", "N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\" *", "whose expectation value we want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary", "in a gray code, determine which bit is the one that was flipped.", "({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx + \"Z\" +", "matrix representation. Parameters: pauli (string): A string indicating the Pauli whose expectation value", "the Pauli in question. eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1 =", "0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate a Gray", "generators of N - 1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI',", "elementwise modulo 2; the sum will be 1 only in the slot #", "base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a", "eigenstates of the Pauli in question. eigenvalues = {} for basis_state in meas_results.keys():", "of +1 and -1 outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for", "integer, then we will construct the generators on N qubits where the x_loc", "the same length.\") if any([x != \"0\" and x != \"1\" for x", "2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in", "inputs {s1}, {s2} is not a valid binary string.\") # Sum the strings", "for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) # Count", "in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2} are the", "def gray_code(N): \"\"\" Generate a Gray code for traversing the N qubit states.", "are not ordered in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take", "get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute its matrix representation. Parameters: pauli", "into individual Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the", "{N}-qubit Pauli.\") base_generators = [list(\"I\" * idx + \"Z\" + \"I\" * (N", "n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in ['I', 'X', 'Y',", "I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def", "for x in s2]): raise ValueError(f\"One of inputs {s1}, {s2} is not a", "+1 or -1 # eigenstates of the Pauli in question. eigenvalues = {}", "eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if", "x_loc is set to an integer, then we will construct the generators on", "consist only of I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx", "pauli_list = list(pauli) if any([op not in ['I', 'X', 'Y', 'Z'] for op", "\"0\" and x != \"1\" for x in s2]): raise ValueError(f\"One of inputs", "want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the results of", "of a given Pauli based on the measurement outcomes observed in result. Parameters:", "else: if x_loc < 0 or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc})", "bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) # Count number", "we will construct the generators on N qubits where the x_loc qubit is", "string and compute its matrix representation. Parameters: pauli (string): A string indicating the", "\"1\"] else: sub_code = gray_code(N-1) return [\"0\" + x for x in sub_code]", "will construct the generators on N qubits where the x_loc qubit is set", "- idx - 2)) for idx in range(N - 1)] # If we", "I, X, Y, or Z.\") # Determine whether the computational basis states in", "(2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with 0.5", "string must consist only of I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx]", "openfermion QubitOperator # Copy the sequence before making replacements substitution_seq = seq if", "terms of Paulis # return an openfermion QubitOperator # Copy the sequence before", "making replacements substitution_seq = seq if len(seq) <= 0: raise ValueError(f\"Cannot expand empty", "computational basis states in meas_results are +1 or -1 # eigenstates of the", "import QubitOperator from itertools import product from functools import reduce mats = {\"I\"", "import reduce mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])),", "[\"0\" + x for x in sub_code] + [\"1\" + x for x", "= [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))] if string_sums.count(1) ==", "= [] # Expand out the term into individual Paulis for pauli in", "qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators:", "right here. \"\"\" pauli_list = list(pauli) if any([op not in ['I', 'X', 'Y',", "!= \"1\" for x in s2]): raise ValueError(f\"One of inputs {s1}, {s2} is", "Construct a list of strings of Pauli generators on N qubits. If x_loc", "2)) for idx in range(N - 1)] # If we have two qubits,", "is not int: raise ValueError(\"Input for gray code construction must be a positive", "int: raise ValueError(\"Input for gray code construction must be a positive integer.\") if", "x for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\" For two adjacent elements", "string_sums.index(1) def expand_projector_sequence(seq): # Take a list of projectors, e.g. [\"P0\", \"P1\", \"X\"]", "= seq if len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector sequence.\") if", "x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators =", "36}, etc. Returns: The expectation value of pauli. \"\"\" pauli_list = list(pauli) n_qubits", "string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2} are the same.\") elif string_sums.count(1)", "and compute its matrix representation. Parameters: pauli (string): A string indicating the Pauli", "!= \"0\" and x != \"1\" for x in s2]): raise ValueError(f\"One of", "sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators: full_operator += term return full_operator", "reduce(lambda x, y: x*y, num_z_and_1) # Count number of +1 and -1 outcomes,", "'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1: raise", "want to compute, e.g. \"ZZIZZ\". Tensor products are computed from left to right", "a list of strings of Pauli generators on N qubits. If x_loc is", "[\"P0\", \"P1\", \"X\"] and expand it in terms of Paulis # return an", "results of an experiment run on qiskit. The key value pairs are computational", "for x in seq]): raise ValueError(f\"Sequence {seq} contains elements that are not Paulis", "with their qubit indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx]", "list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it in terms of", "return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and", "First, replace P0 and P1 with 0.5 (1 +- Z) for item_idx in", "or -1 # eigenstates of the Pauli in question. eigenvalues = {} for", "add I to the generator list if N == 2: base_generators.append([\"I\"]) for idx", "sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for", "mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq} contains elements that are not", "[(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))] if string_sums.count(1) == 0:", "1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1]) return", "N qubit states. \"\"\" if N <= 0 or type(N) is not int:", "0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])),", "[\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\" + x for x in", "+ \"I\" * (N - idx - 1) for idx in range(N)] else:", "np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate a Gray code for traversing", "and x != \"1\" for x in s2]): raise ValueError(f\"One of inputs {s1},", "must be a positive integer.\") if N == 1: # Base case return", "return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string", "elements in a gray code, determine which bit is the one that was", "observed in result. Parameters: pauli (string): A string indicating the Pauli whose expectation", "N <= 0 or type(N) is not int: raise ValueError(\"Input for gray code", "outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if", "len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x not in", "= gray_code(N-1) return [\"0\" + x for x in sub_code] + [\"1\" +", "raise ValueError(f\"Sequence {seq} contains elements that are not Paulis or P0/P1 projectors.\") prefactor", "ValueError(f\"Strings {s1} and {s2} are not ordered in a gray code.\") return string_sums.index(1)", "qiskit. The key value pairs are computational basis states and number of times", "\"\"\" if N < 1: raise ValueError(\"Number of Paulis must be >= 1", "case return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\" + x for", "e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the results of an experiment run", "qubits, need to add I to the generator list if N == 2:", "Compute and return the expectation value of a given Pauli based on the", "{\"I\" : np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j],", "== 1]) num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1])", "representation. Parameters: pauli (string): A string indicating the Pauli whose expectation value we", "pauli_string.count(\"m\") pauli_string = pauli_string.replace(\"m\", \"\") # Remove identities and label Paulis with their", "a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a list of projectors,", "projector sequence.\") if any([x not in mats.keys() for x in seq]): raise ValueError(f\"Sequence", "sequence before making replacements substitution_seq = seq if len(seq) <= 0: raise ValueError(f\"Cannot", "np from openfermion.ops import QubitOperator from itertools import product from functools import reduce", "'1000' : 36}, etc. Returns: The expectation value of pauli. \"\"\" pauli_list =", "= ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number of Paulis", "expectation value of pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots =", "1]]))} def gray_code(N): \"\"\" Generate a Gray code for traversing the N qubit", "of inputs {s1}, {s2} is not a valid binary string.\") # Sum the", "observed, e.g. {'1001' : 24, '1000' : 36}, etc. Returns: The expectation value", "product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign and remove the m indicators", "indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string", "1: # Base case return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\"", "in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\"", "a Pauli string and compute its matrix representation. Parameters: pauli (string): A string", "for key in eigenvalues.keys() if eigenvalues[key] == -1]) return (num_0_outcomes - num_1_outcomes) /", "product from functools import reduce mats = {\"I\" : np.eye(2), \"X\" : np.array(([[0,", "== 1: # Base case return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return", "\"P1\", \"X\"] and expand it in terms of Paulis # return an openfermion", "the term into individual Paulis for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) #", "set to an integer, then we will construct the generators on N qubits", "\"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] # Expand out the term", "If x_loc is set to an integer, then we will construct the generators", "determine which bit is the one that was flipped. \"\"\" if len(s1) ==", "the measurement outcomes observed in result. Parameters: pauli (string): A string indicating the", "traversing the N qubit states. \"\"\" if N <= 0 or type(N) is", "string indicating the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\".", "len(s1) == 0 or len(s2) == 0: raise ValueError(\"Empty string inputted.\") if len(s1)", "len(s2) == 0: raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings", "and number of times that state was observed, e.g. {'1001' : 24, '1000'", "Extract the sign and remove the m indicators sign = (-1) ** pauli_string.count(\"m\")", "!= 'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y:", "x for x in sub_code] + [\"1\" + x for x in sub_code[::-1]]", "pauli_list]): raise ValueError(\"Pauli string must consist only of I, X, Y, or Z.\")", "1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) #", "If we have two qubits, need to add I to the generator list", "ValueError(f\"One of inputs {s1}, {s2} is not a valid binary string.\") # Sum", "in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator", "in seq]): raise ValueError(f\"Sequence {seq} contains elements that are not Paulis or P0/P1", "pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\"", "= sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1]) return (num_0_outcomes -", "key value pairs are computational basis states and number of times that state", "a Gray code for traversing the N qubit states. \"\"\" if N <=", ": np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N):", "expand it in terms of Paulis # return an openfermion QubitOperator # Copy", "return an openfermion QubitOperator # Copy the sequence before making replacements substitution_seq =", "y: x*y, num_z_and_1) # Count number of +1 and -1 outcomes, i.e. 0", "for basis_state in meas_results.keys(): num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx]", "+= term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings", "not Paulis or P0/P1 projectors.\") prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") +", "prefactor = 1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0", "and the remaining qubits contain the generators of N - 1 qubits. For", "for traversing the N qubit states. \"\"\" if N <= 0 or type(N)", "any([x not in mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq} contains elements", "len(s2): raise ValueError(\"Strings compared in gray code must have the same length.\") if", "# Determine whether the computational basis states in meas_results are +1 or -1", "Parameters: pauli (string): A string indicating the Pauli whose expectation value we want", "then we will construct the generators on N qubits where the x_loc qubit", ": np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" :", "if N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen)", "ValueError(\"Pauli string must consist only of I, X, Y, or Z.\") # Determine", "raise ValueError(f\"Strings {s1} and {s2} are the same.\") elif string_sums.count(1) > 1: raise", "to X and the remaining qubits contain the generators of N - 1", "the strings elementwise modulo 2; the sum will be 1 only in the", "raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\") base_generators = [list(\"I\" * idx", "+ \"Z\" + \"I\" * (N - idx - 2)) for idx in", "I to the generator list if N == 2: base_generators.append([\"I\"]) for idx in", "[0, 1]]))} def gray_code(N): \"\"\" Generate a Gray code for traversing the N", "for pauli in product(*substitution_seq): pauli_string = \"\".join(pauli) # Extract the sign and remove", "construct generators.\") if x_loc is None: return [\"I\" * idx + \"Z\" +", "on qiskit. The key value pairs are computational basis states and number of", "in mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq} contains elements that are", "np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])),", "0: raise ValueError(f\"Strings {s1} and {s2} are the same.\") elif string_sums.count(1) > 1:", "of an experiment run on qiskit. The key value pairs are computational basis", "the generators on N qubits where the x_loc qubit is set to X", "are computed from left to right here. \"\"\" pauli_list = list(pauli) if any([op", "\"\"\" Construct a list of strings of Pauli generators on N qubits. If", "Pauli generators on N qubits. If x_loc is set to an integer, then", "where the x_loc qubit is set to X and the remaining qubits contain", "\"0\" and x != \"1\" for x in s1]) or any([x != \"0\"", "on N qubits. If x_loc is set to an integer, then we will", "Pauli in question. eigenvalues = {} for basis_state in meas_results.keys(): num_z_and_1 = [-1", "was observed, e.g. {'1001' : 24, '1000' : 36}, etc. Returns: The expectation", "in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a list of", "s2]): raise ValueError(f\"One of inputs {s1}, {s2} is not a valid binary string.\")", "the one that was flipped. \"\"\" if len(s1) == 0 or len(s2) ==", "it in terms of Paulis # return an openfermion QubitOperator # Copy the", "generator list if N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\")", "type(N) is not int: raise ValueError(\"Input for gray code construction must be a", ">= 1 to construct generators.\") if x_loc is None: return [\"I\" * idx", "outcomes observed in result. Parameters: pauli (string): A string indicating the Pauli whose", "if N < 1: raise ValueError(\"Number of Paulis must be >= 1 to", "list if N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return", "value of pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values())", "\"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\"", "value pairs are computational basis states and number of times that state was", "substitution_seq.count(\"P1\"))) # First, replace P0 and P1 with 0.5 (1 +- Z) for", "+ \"Z\" + \"I\" * (N - idx - 1) for idx in", "item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx]", "= ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if", "+ \"I\" * (N - idx - 2)) for idx in range(N -", "meas_results (Dict): A dictionary containing the results of an experiment run on qiskit.", "== '1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state]", "= reduce(lambda x, y: x*y, num_z_and_1) # Count number of +1 and -1", "\"\"\" pauli_list = list(pauli) if any([op not in ['I', 'X', 'Y', 'Z'] for", "N < 1: raise ValueError(\"Number of Paulis must be >= 1 to construct", "idx in range(N - 1)] # If we have two qubits, need to", "must consist only of I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for", "sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1]) return (num_0_outcomes - num_1_outcomes)", "if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx]", "state was observed, e.g. {'1001' : 24, '1000' : 36}, etc. Returns: The", "N == 1: # Base case return [\"0\", \"1\"] else: sub_code = gray_code(N-1)", "which bit is the one that was flipped. \"\"\" if len(s1) == 0", "not ordered in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a", "code.\") return string_sums.index(1) def expand_projector_sequence(seq): # Take a list of projectors, e.g. [\"P0\",", "# Take a list of projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it", "f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator() for term in qubit_operators: full_operator +=", "sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation value", "= list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in ['I',", "same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2} are not ordered", "pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings of Pauli generators on N", "IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number of Paulis must be >=", "- 1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2)", "replace P0 and P1 with 0.5 (1 +- Z) for item_idx in range(len(seq)):", "gray code, determine which bit is the one that was flipped. \"\"\" if", "indicating the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\" meas_results", "[] # Expand out the term into individual Paulis for pauli in product(*substitution_seq):", "if any([x not in mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq} contains", "if any([x != \"0\" and x != \"1\" for x in s1]) or", "we have two qubits, need to add I to the generator list if", "[\"I\", \"mZ\"] qubit_operators = [] # Expand out the term into individual Paulis", "np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" : np.array(([[1,", "value of a given Pauli based on the measurement outcomes observed in result.", "\"\") # Remove identities and label Paulis with their qubit indices qubit_operator_string =", "\"X\") return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli", "= [\"I\", \"mZ\"] qubit_operators = [] # Expand out the term into individual", "== 0 or len(s2) == 0: raise ValueError(\"Empty string inputted.\") if len(s1) !=", "that was flipped. \"\"\" if len(s1) == 0 or len(s2) == 0: raise", "\"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \"", "if x_loc is None: return [\"I\" * idx + \"Z\" + \"I\" *", "if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2} are the same.\") elif", "identities and label Paulis with their qubit indices qubit_operator_string = \"\" for qubit_idx", "to the generator list if N == 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)):", "binary string.\") # Sum the strings elementwise modulo 2; the sum will be", "value we want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing the", "not in mats.keys() for x in seq]): raise ValueError(f\"Sequence {seq} contains elements that", "\"P1\" : np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate a Gray code", "for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators] def", "(string): A string indicating the Pauli whose expectation value we want to compute,", "in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] ==", ": np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0], [0, -1]])), \"P0\" :", "given Pauli based on the measurement outcomes observed in result. Parameters: pauli (string):", "\"\"\" if len(s1) == 0 or len(s2) == 0: raise ValueError(\"Empty string inputted.\")", "sub_code] + [\"1\" + x for x in sub_code[::-1]] def find_flipped_bit(s1, s2): \"\"\"", "in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation value of", "in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1) # Count number of", "0: raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x not in mats.keys() for", "are computational basis states and number of times that state was observed, e.g.", "QubitOperator from itertools import product from functools import reduce mats = {\"I\" :", "pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number", "+1 and -1 outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for key", "sum will be 1 only in the slot # where we flipped a", ": np.eye(2), \"X\" : np.array(([[0, 1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j,", "empty projector sequence.\") if any([x not in mats.keys() for x in seq]): raise", "1 to construct generators.\") if x_loc is None: return [\"I\" * idx +", "the sum will be 1 only in the slot # where we flipped", "idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen in base_generators] def get_pauli_matrix(pauli):", "that state was observed, e.g. {'1001' : 24, '1000' : 36}, etc. Returns:", "if N == 1: # Base case return [\"0\", \"1\"] else: sub_code =", "-1 # eigenstates of the Pauli in question. eigenvalues = {} for basis_state", "is the one that was flipped. \"\"\" if len(s1) == 0 or len(s2)", "qubit indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\":", "if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor)) full_operator = QubitOperator()", "states. \"\"\" if N <= 0 or type(N) is not int: raise ValueError(\"Input", "example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ']", "key in eigenvalues.keys() if eigenvalues[key] == 1]) num_1_outcomes = sum([meas_results[key] for key in", "raise ValueError(f\"Strings {s1} and {s2} are not ordered in a gray code.\") return", "pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in", "indicating the Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\". Tensor", "return [\"0\", \"1\"] else: sub_code = gray_code(N-1) return [\"0\" + x for x", "x_loc qubit is set to X and the remaining qubits contain the generators", "{s2} are not ordered in a gray code.\") return string_sums.index(1) def expand_projector_sequence(seq): #", "have the same length.\") if any([x != \"0\" and x != \"1\" for", "\"1\" for x in s1]) or any([x != \"0\" and x != \"1\"", "consist only of I, X, Y, or Z.\") # Determine whether the computational", "else: sub_code = gray_code(N-1) return [\"0\" + x for x in sub_code] +", "two adjacent elements in a gray code, determine which bit is the one", "% 2 for i in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1}", "sign and remove the m indicators sign = (-1) ** pauli_string.count(\"m\") pauli_string =", "24, '1000' : 36}, etc. Returns: The expectation value of pauli. \"\"\" pauli_list", "Y, or Z.\") # Determine whether the computational basis states in meas_results are", "Generate a Gray code for traversing the N qubit states. \"\"\" if N", "X and the remaining qubits contain the generators of N - 1 qubits.", "* idx + \"Z\" + \"I\" * (N - idx - 1) for", "the slot # where we flipped a bit string_sums = [(int(s1[i]) + int(s2[i]))", "1], [1, 0]])), \"Y\" : np.array(([[0, -1j], [1j, 0]])), \"Z\" : np.array(([[1, 0],", "2) = ['ZIXI', 'IZXI', IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number of", "term in qubit_operators: full_operator += term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct", "list(pauli) if any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]):", "and return the expectation value of a given Pauli based on the measurement", "0 or len(s2) == 0: raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2):", "a bit string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))]", "for i in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2}", "any([x != \"0\" and x != \"1\" for x in s2]): raise ValueError(f\"One", "meas_results): \"\"\" Compute and return the expectation value of a given Pauli based", "(N - idx - 2)) for idx in range(N - 1)] # If", ": np.array(([[0, 0], [0, 1]]))} def gray_code(N): \"\"\" Generate a Gray code for", "{s1}, {s2} is not a valid binary string.\") # Sum the strings elementwise", "is set to an integer, then we will construct the generators on N", "- idx - 1) for idx in range(N)] else: if x_loc < 0", "any([x != \"0\" and x != \"1\" for x in s1]) or any([x", "The key value pairs are computational basis states and number of times that", "whether the computational basis states in meas_results are +1 or -1 # eigenstates", "of Pauli generators on N qubits. If x_loc is set to an integer,", "x != \"1\" for x in s2]): raise ValueError(f\"One of inputs {s1}, {s2}", "x in s1]) or any([x != \"0\" and x != \"1\" for x", "ValueError(\"Strings compared in gray code must have the same length.\") if any([x !=", "0.5 (1 +- Z) for item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx]", "= len(pauli_list) n_shots = sum(meas_results.values()) if any([op not in ['I', 'X', 'Y', 'Z']", "0 or x_loc > N: raise ValueError(f\"Invalid placement ({x_loc}) X in {N}-qubit Pauli.\")", "num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1", "x in seq]): raise ValueError(f\"Sequence {seq} contains elements that are not Paulis or", "pauli_string = \"\".join(pauli) # Extract the sign and remove the m indicators sign", "0: raise ValueError(\"Empty string inputted.\") if len(s1) != len(s2): raise ValueError(\"Strings compared in", "== \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] # Expand out the", "the expectation value of a given Pauli based on the measurement outcomes observed", "!= \"0\" and x != \"1\" for x in s1]) or any([x !=", "A string indicating the Pauli whose expectation value we want to compute, e.g.", "def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute its matrix representation. Parameters:", "of N - 1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ']", "return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list of strings of Pauli", "a valid binary string.\") # Sum the strings elementwise modulo 2; the sum", "N - 1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4,", "reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list]) def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return", "0 or type(N) is not int: raise ValueError(\"Input for gray code construction must", "full_operator += term return full_operator def pauli_generators(N, x_loc=None): \"\"\" Construct a list of", "{s1} and {s2} are not ordered in a gray code.\") return string_sums.index(1) def", "'I') else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y,", "if len(s1) != len(s2): raise ValueError(\"Strings compared in gray code must have the", "with 0.5 (1 +- Z) for item_idx in range(len(seq)): if seq[item_idx] == \"P0\":", "if N <= 0 or type(N) is not int: raise ValueError(\"Input for gray", "Remove identities and label Paulis with their qubit indices qubit_operator_string = \"\" for", "range(N)] else: if x_loc < 0 or x_loc > N: raise ValueError(f\"Invalid placement", "or any([x != \"0\" and x != \"1\" for x in s2]): raise", "{s2} are the same.\") elif string_sums.count(1) > 1: raise ValueError(f\"Strings {s1} and {s2}", "the x_loc qubit is set to X and the remaining qubits contain the", "only of I, X, Y, or Z.\") return reduce(np.kron, [mats[sigma_idx] for sigma_idx in", "and -1 outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key] for key in", "a gray code, determine which bit is the one that was flipped. \"\"\"", "length.\") if any([x != \"0\" and x != \"1\" for x in s1])", "P0 and P1 with 0.5 (1 +- Z) for item_idx in range(len(seq)): if", "\"1\" for x in s2]): raise ValueError(f\"One of inputs {s1}, {s2} is not", "* (N - idx - 1) for idx in range(N)] else: if x_loc", "adjacent elements in a gray code, determine which bit is the one that", "Paulis with their qubit indices qubit_operator_string = \"\" for qubit_idx in range(len(pauli_string)): if", "in range(N - 1)] # If we have two qubits, need to add", "expectation value we want to compute, e.g. \"ZZIZZ\" meas_results (Dict): A dictionary containing", "be a positive integer.\") if N == 1: # Base case return [\"0\",", "(Dict): A dictionary containing the results of an experiment run on qiskit. The", "in terms of Paulis # return an openfermion QubitOperator # Copy the sequence", "1 qubits. For example, pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ'] pauli_generators(4, 2) =", "len(s1) != len(s2): raise ValueError(\"Strings compared in gray code must have the same", "else 1 for bit_idx in range(n_qubits)] eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1)", "Sum the strings elementwise modulo 2; the sum will be 1 only in", "def pauli_expectation_value(pauli, meas_results): \"\"\" Compute and return the expectation value of a given", "basis states in meas_results are +1 or -1 # eigenstates of the Pauli", "1) for idx in range(N)] else: if x_loc < 0 or x_loc >", "integer.\") if N == 1: # Base case return [\"0\", \"1\"] else: sub_code", "N qubits where the x_loc qubit is set to X and the remaining", "Copy the sequence before making replacements substitution_seq = seq if len(seq) <= 0:", "Pauli whose expectation value we want to compute, e.g. \"ZZIZZ\". Tensor products are", "\"P0\" : np.array(([[1, 0], [0, 0]])), \"P1\" : np.array(([[0, 0], [0, 1]]))} def", "replacements substitution_seq = seq if len(seq) <= 0: raise ValueError(f\"Cannot expand empty projector", "seq[item_idx] == \"P1\": substitution_seq[item_idx] = [\"I\", \"mZ\"] qubit_operators = [] # Expand out", "\"\".join(pauli) # Extract the sign and remove the m indicators sign = (-1)", "\"X\"] and expand it in terms of Paulis # return an openfermion QubitOperator", "'IZXI', IIXZ'] \"\"\" if N < 1: raise ValueError(\"Number of Paulis must be", "\"ZZIZZ\". Tensor products are computed from left to right here. \"\"\" pauli_list =", "\"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op not", "# return an openfermion QubitOperator # Copy the sequence before making replacements substitution_seq", "= \"\" for qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx}", "projectors, e.g. [\"P0\", \"P1\", \"X\"] and expand it in terms of Paulis #", "qubit_idx in range(len(pauli_string)): if pauli_string[qubit_idx] != \"I\": qubit_operator_string += f\"{pauli_string[qubit_idx]}{qubit_idx} \" qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor))", "pauli. \"\"\" pauli_list = list(pauli) n_qubits = len(pauli_list) n_shots = sum(meas_results.values()) if any([op", "== 2: base_generators.append([\"I\"]) for idx in range(len(base_generators)): base_generators[idx].insert(x_loc, \"X\") return [\"\".join(gen) for gen", "code construction must be a positive integer.\") if N == 1: # Base", "1 / (2 ** (substitution_seq.count(\"P0\") + substitution_seq.count(\"P1\"))) # First, replace P0 and P1", "seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\", \"Z\"] elif seq[item_idx] == \"P1\": substitution_seq[item_idx] =", "Determine whether the computational basis states in meas_results are +1 or -1 #", "+- Z) for item_idx in range(len(seq)): if seq[item_idx] == \"P0\": substitution_seq[item_idx] = [\"I\",", "number of +1 and -1 outcomes, i.e. 0 and 1 num_0_outcomes = sum([meas_results[key]", "slot # where we flipped a bit string_sums = [(int(s1[i]) + int(s2[i])) %", "i in range(len(s1))] if string_sums.count(1) == 0: raise ValueError(f\"Strings {s1} and {s2} are", "op in pauli_list]): raise ValueError(\"Pauli string must consist only of I, X, Y,", "strings elementwise modulo 2; the sum will be 1 only in the slot", "raise ValueError(f\"Cannot expand empty projector sequence.\") if any([x not in mats.keys() for x", "in base_generators] def get_pauli_matrix(pauli): \"\"\" Take a Pauli string and compute its matrix", "left to right here. \"\"\" pauli_list = list(pauli) if any([op not in ['I',", "if len(s1) == 0 or len(s2) == 0: raise ValueError(\"Empty string inputted.\") if" ]