code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
# -*- coding: utf8 -*- from django.db import models import custom_fields import datetime #import mptt # Create your models here. class Message(models.Model): user = models.ForeignKey('User') time = models.DateTimeField(auto_now=True,auto_now_add=True) text = models.TextField() #true если это ответ поддержки reply = models.BooleanField(default=False) ticket = models.ForeignKey('Ticket') ip = models.IPAddressField(blank=True,null=True) class User(models.Model): name = models.CharField("Имя",max_length=60) email = models.EmailField(blank=True,null=True) phone = models.CharField("Внутр. телефон",max_length=30,blank=True,null=True) mobile = models.CharField("Корп. мобильный",max_length=30,blank=True,null=True) city_phone = models.CharField("Городской телефон",max_length=30,blank=True,null=True) sat_phone = models.CharField("Спутниковый телефон",max_length=30,blank=True,null=True) personal_phone = models.CharField("Личный телефон",max_length=30,blank=True,null=True) admin = models.BooleanField(default=False) login = models.CharField(max_length=16,blank=True,null=True) password = models.CharField(max_length=32,blank=True,null=True) place = models.ForeignKey('Place',blank=True,null=True) class Device(models.Model): TYPE_CHOICES=( ('00','Компьютер'), ('10','Монитор'), ('20','Принтер'), ('30','МФУ'), ('40','Плоттер'), ('50','Сканер'), ('60','Сервер'), ('70','Маршрутизатор'), ('80','Модем'), ) type=models.CharField(max_length=3,choices=TYPE_CHOICES) inv_no=models.CharField(max_length=40) ip=models.IPAddressField(blank=True,null=True) model=models.CharField(max_length=60,blank=True,null=True) mac=custom_fields.MACAddressField(blank=True,null=True) info=models.TextField(blank=True,null=True) place = models.ForeignKey('Place') hostname=models.CharField(blank=True,null=True,max_length=40) def type_display(self): for desc in self.TYPE_CHOICES: if desc[0]==self.type: return desc[1] def get_absolute_url(self): return "/place/"+str(self.place.id) class Ticket(models.Model): #NEW,OPEN,CLOSED,DELETED STATUS_CHOICES=( ('00','Новое'), ('10','Принято'), ('20','Ожидаем ответ'), ('30','Закрыто'), ('40','Удалено'), ) PRIO_CHOICES=( ('00','Крайне срочно'), ('10','Срочно'), ('20','Обычно'), ('30','Длительное') ) CATEGORY_CHOICES=( ('00','Компьютеры, локальный софт, железо'), ('10','Печать, принтеры, расходники'), ('20','Корпоративные системы (SAP,АСУД ..)'), ('30','Сетевые сервисы и оборуд., Серверы'), ('40','СКС (провода, розетки)'), ) status = models.CharField("Статус",max_length=3, choices=STATUS_CHOICES) priority = models.CharField("Приоритет",max_length=3, choices=PRIO_CHOICES) category = models.CharField("Категория",max_length=3, choices=CATEGORY_CHOICES,blank=True,null=True) hours_limit=models.DecimalField("Лимит времени, ч.",max_digits=4, decimal_places=1,default=2) #Описание проблемы. при создании тикета - присваиваем текст 1го обращения #В процессе выполнения заявки можем менять description = models.TextField("Описание проблемы") #Описание решения по закрытии заявки resume = models.TextField("Отчёт о решении",blank=True,null=True) user = models.ForeignKey(User,related_name="tickets") admin = models.ForeignKey(User,related_name="tasks",blank=True,null=True) device = models.ForeignKey(Device,blank=True,null=True) #Время создания. ctime = models.DateTimeField(auto_now_add = True) #Время закрытия closing_time = models.DateTimeField(blank=True,null=True) def get_short_text(self): return self.description[:120] def hours_from_now(self): delta=datetime.datetime.now()-self.ctime return round(delta.days*24.0+delta.seconds/3600.0,1) def is_new(self,*args): value=self.status if args: value=args[0] if value=='00': return True else: return False def is_closed(self,*args): value=self.status if args: value=args[0] if value=='30': return True else: return False def accept_by(self,user): self.admin=user def no(self): return '{0:0>5}'.format(self.id) class Place(models.Model): name = models.CharField(max_length=60) parent = models.ForeignKey('self',null=True, blank=True ) address = models.CharField(max_length=70) LEVEL_DESC=( (1,"Населённый пункт"), (2,"Территория, группа зданий"), (3,"Здание"), (4,"Этаж"), (5,"Кабинет/помещение"), (6,"Место/комплекс"), ) def childs(self): return Place.objects.filter(parent=self) def get_level(self): res=0 try: if self.parent!=None: o=self while (o.parent !=None): res+=1 o=o.parent except: None return res def level_display(self): level=self.get_level() for desc in self.LEVEL_DESC: if desc[0]==level: return desc[1] def path(self): path=[] o=self while (o.parent != None): path.insert(0,o) o=o.parent path.insert(0,o) return path def get_absolute_url(self): return '/place/'+str(self.id) def __unicode__(self): return self.name def users(self): return User.objects.filter(place=self) #mptt.register(Place) class Document(models.Model): name=models.CharField(max_length=60) place=models.ForeignKey(Place,blank=True,null=True) def latest_file(self): return DocFile.objects.filter(document=self).order_by('-id')[0] class DocFile(models.Model): document=models.ForeignKey(Document) version=models.IntegerField() file_name=models.CharField(max_length=60) comment=models.CharField(max_length=90,blank=True,null=True) ctime = models.DateTimeField() user = models.ForeignKey(User)
normal
{ "blob_id": "64fd597918fe8133d53d1df741512cd2e49a111d", "index": 1252, "step-1": "<mask token>\n\n\nclass Ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n <mask token>\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n", "step-2": "<mask token>\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n", "step-3": "<mask token>\n\n\nclass User(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Device(models.Model):\n TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (\n '30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (\n '70', 'Маршрутизатор'), ('80', 'Модем')\n type = models.CharField(max_length=3, choices=TYPE_CHOICES)\n inv_no = models.CharField(max_length=40)\n ip = models.IPAddressField(blank=True, null=True)\n model = models.CharField(max_length=60, blank=True, null=True)\n mac = custom_fields.MACAddressField(blank=True, null=True)\n info = models.TextField(blank=True, null=True)\n place = models.ForeignKey('Place')\n hostname = models.CharField(blank=True, null=True, max_length=40)\n\n def type_display(self):\n for desc in self.TYPE_CHOICES:\n if desc[0] == self.type:\n return desc[1]\n\n def get_absolute_url(self):\n return '/place/' + str(self.place.id)\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n", "step-4": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass User(models.Model):\n name = models.CharField('Имя', max_length=60)\n email = models.EmailField(blank=True, null=True)\n phone = models.CharField('Внутр. телефон', max_length=30, blank=True,\n null=True)\n mobile = models.CharField('Корп. мобильный', max_length=30, blank=True,\n null=True)\n city_phone = models.CharField('Городской телефон', max_length=30, blank\n =True, null=True)\n sat_phone = models.CharField('Спутниковый телефон', max_length=30,\n blank=True, null=True)\n personal_phone = models.CharField('Личный телефон', max_length=30,\n blank=True, null=True)\n admin = models.BooleanField(default=False)\n login = models.CharField(max_length=16, blank=True, null=True)\n password = models.CharField(max_length=32, blank=True, null=True)\n place = models.ForeignKey('Place', blank=True, null=True)\n\n\nclass Device(models.Model):\n TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (\n '30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (\n '70', 'Маршрутизатор'), ('80', 'Модем')\n type = models.CharField(max_length=3, choices=TYPE_CHOICES)\n inv_no = models.CharField(max_length=40)\n ip = models.IPAddressField(blank=True, null=True)\n model = models.CharField(max_length=60, blank=True, null=True)\n mac = custom_fields.MACAddressField(blank=True, null=True)\n info = models.TextField(blank=True, null=True)\n place = models.ForeignKey('Place')\n hostname = models.CharField(blank=True, null=True, max_length=40)\n\n def type_display(self):\n for desc in self.TYPE_CHOICES:\n if desc[0] == self.type:\n return desc[1]\n\n def get_absolute_url(self):\n return '/place/' + str(self.place.id)\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n", "step-5": "# -*- coding: utf8 -*-\nfrom django.db import models\nimport custom_fields\nimport datetime\n#import mptt\n\n# Create your models here.\nclass Message(models.Model):\n user = models.ForeignKey('User')\n time = models.DateTimeField(auto_now=True,auto_now_add=True)\n text = models.TextField()\n #true если это ответ поддержки\n reply = models.BooleanField(default=False)\n ticket = models.ForeignKey('Ticket')\n ip = models.IPAddressField(blank=True,null=True)\n \n\nclass User(models.Model):\n name = models.CharField(\"Имя\",max_length=60)\n email = models.EmailField(blank=True,null=True)\n phone = models.CharField(\"Внутр. телефон\",max_length=30,blank=True,null=True)\n mobile = models.CharField(\"Корп. мобильный\",max_length=30,blank=True,null=True)\n city_phone = models.CharField(\"Городской телефон\",max_length=30,blank=True,null=True)\n sat_phone = models.CharField(\"Спутниковый телефон\",max_length=30,blank=True,null=True)\n personal_phone = models.CharField(\"Личный телефон\",max_length=30,blank=True,null=True)\n admin = models.BooleanField(default=False)\n login = models.CharField(max_length=16,blank=True,null=True)\n password = models.CharField(max_length=32,blank=True,null=True)\n place = models.ForeignKey('Place',blank=True,null=True)\n\nclass Device(models.Model):\n TYPE_CHOICES=(\n ('00','Компьютер'),\n ('10','Монитор'),\n ('20','Принтер'),\n ('30','МФУ'),\n ('40','Плоттер'),\n ('50','Сканер'),\n ('60','Сервер'),\n ('70','Маршрутизатор'),\n ('80','Модем'),\n )\n type=models.CharField(max_length=3,choices=TYPE_CHOICES)\n inv_no=models.CharField(max_length=40)\n ip=models.IPAddressField(blank=True,null=True)\n model=models.CharField(max_length=60,blank=True,null=True)\n mac=custom_fields.MACAddressField(blank=True,null=True)\n info=models.TextField(blank=True,null=True)\n place = models.ForeignKey('Place')\n hostname=models.CharField(blank=True,null=True,max_length=40)\n def type_display(self): \n for desc in self.TYPE_CHOICES:\n if desc[0]==self.type:\n return desc[1]\n def get_absolute_url(self):\n return \"/place/\"+str(self.place.id)\n\nclass Ticket(models.Model):\n #NEW,OPEN,CLOSED,DELETED\n STATUS_CHOICES=(\n ('00','Новое'),\n ('10','Принято'),\n ('20','Ожидаем ответ'),\n ('30','Закрыто'),\n ('40','Удалено'), \n )\n PRIO_CHOICES=(\n ('00','Крайне срочно'),\n ('10','Срочно'),\n ('20','Обычно'),\n ('30','Длительное')\n )\n\n CATEGORY_CHOICES=(\n ('00','Компьютеры, локальный софт, железо'),\n ('10','Печать, принтеры, расходники'),\n ('20','Корпоративные системы (SAP,АСУД ..)'),\n ('30','Сетевые сервисы и оборуд., Серверы'),\n ('40','СКС (провода, розетки)'),\n \n )\n\n status = models.CharField(\"Статус\",max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField(\"Приоритет\",max_length=3, choices=PRIO_CHOICES)\n category = models.CharField(\"Категория\",max_length=3, choices=CATEGORY_CHOICES,blank=True,null=True)\n hours_limit=models.DecimalField(\"Лимит времени, ч.\",max_digits=4, decimal_places=1,default=2)\n #Описание проблемы. при создании тикета - присваиваем текст 1го обращения\n #В процессе выполнения заявки можем менять\n description = models.TextField(\"Описание проблемы\")\n #Описание решения по закрытии заявки\n resume = models.TextField(\"Отчёт о решении\",blank=True,null=True)\n user = models.ForeignKey(User,related_name=\"tickets\")\n admin = models.ForeignKey(User,related_name=\"tasks\",blank=True,null=True)\n device = models.ForeignKey(Device,blank=True,null=True) \n #Время создания. \n ctime = models.DateTimeField(auto_now_add = True)\n #Время закрытия\n closing_time = models.DateTimeField(blank=True,null=True)\n\n def get_short_text(self):\n return self.description[:120]\n \n def hours_from_now(self):\n delta=datetime.datetime.now()-self.ctime\n return round(delta.days*24.0+delta.seconds/3600.0,1)\n\n def is_new(self,*args):\n value=self.status\n if args:\n value=args[0]\n if value=='00':\n return True\n else:\n return False\n\n def is_closed(self,*args):\n value=self.status\n if args:\n value=args[0]\n if value=='30':\n return True\n else:\n return False\n \n def accept_by(self,user):\n self.admin=user\n \n def no(self):\n return '{0:0>5}'.format(self.id)\n \nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self',null=True, blank=True )\n address = models.CharField(max_length=70)\n LEVEL_DESC=(\n (1,\"Населённый пункт\"),\n (2,\"Территория, группа зданий\"),\n (3,\"Здание\"),\n (4,\"Этаж\"),\n (5,\"Кабинет/помещение\"),\n (6,\"Место/комплекс\"),\n )\n def childs(self):\n return Place.objects.filter(parent=self)\n \n def get_level(self):\n res=0\n try:\n if self.parent!=None:\n o=self\n while (o.parent !=None):\n res+=1\n o=o.parent\n except:\n None\n return res\n \n def level_display(self):\n level=self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0]==level:\n return desc[1]\n \n def path(self): \n path=[]\n o=self\n while (o.parent != None):\n path.insert(0,o)\n o=o.parent\n path.insert(0,o)\n return path\n def get_absolute_url(self):\n return '/place/'+str(self.id)\n def __unicode__(self):\n return self.name\n \n def users(self):\n return User.objects.filter(place=self)\n\n#mptt.register(Place)\n\nclass Document(models.Model):\n name=models.CharField(max_length=60)\n place=models.ForeignKey(Place,blank=True,null=True)\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n \nclass DocFile(models.Model):\n document=models.ForeignKey(Document)\n version=models.IntegerField()\n file_name=models.CharField(max_length=60)\n comment=models.CharField(max_length=90,blank=True,null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n \n", "step-ids": [ 20, 22, 27, 29, 32 ] }
[ 20, 22, 27, 29, 32 ]
# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common utilities for test classes.""" import contextlib import os import re import unittest import webtest from core.domain import config_domain from core.domain import exp_domain from core.domain import exp_services from core.platform import models current_user_services = models.Registry.import_current_user_services() import feconf import main import json CSRF_REGEX = ( r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)') # Prefix to append to all lines printed by tests to the console. LOG_LINE_PREFIX = 'LOG_INFO_TEST: ' def empty_environ(): os.environ['AUTH_DOMAIN'] = 'example.com' os.environ['SERVER_NAME'] = 'localhost' os.environ['HTTP_HOST'] = 'localhost' os.environ['SERVER_PORT'] = '8080' os.environ['USER_EMAIL'] = '' os.environ['USER_ID'] = '' os.environ['USER_IS_ADMIN'] = '0' os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % ( os.environ['HTTP_HOST'], os.environ['SERVER_PORT']) class TestBase(unittest.TestCase): """Base class for all tests.""" maxDiff = 2500 DEFAULT_USERNAME = 'defaultusername' def setUp(self): raise NotImplementedError def tearDown(self): raise NotImplementedError def log_line(self, line): """Print the line with a prefix that can be identified by the script that calls the test. """ print '%s%s' % (LOG_LINE_PREFIX, line) def _delete_all_models(self): raise NotImplementedError def login(self, email, is_super_admin=False): os.environ['USER_EMAIL'] = email os.environ['USER_ID'] = self.get_user_id_from_email(email) os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0' def logout(self): os.environ['USER_EMAIL'] = '' os.environ['USER_ID'] = '' os.environ['USER_IS_ADMIN'] = '0' def shortDescription(self): """Additional information logged during unit test invocation.""" # Suppress default logging of docstrings. return None def get_expected_login_url(self, slug): """Returns the expected login URL.""" return current_user_services.create_login_url(slug) def get_expected_logout_url(self, slug): """Returns the expected logout URL.""" return current_user_services.create_logout_url(slug) def _parse_json_response(self, json_response, expect_errors=False): """Convert a JSON server response to an object (such as a dict).""" if not expect_errors: self.assertEqual(json_response.status_int, 200) self.assertEqual( json_response.content_type, 'application/javascript') self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX)) return json.loads(json_response.body[len(feconf.XSSI_PREFIX):]) def get_json(self, url): """Get a JSON response, transformed to a Python object.""" json_response = self.testapp.get(url) self.assertEqual(json_response.status_int, 200) return self._parse_json_response(json_response, expect_errors=False) def post_json(self, url, payload, csrf_token=None, expect_errors=False, expected_status_int=200, upload_files=None): """Post an object to the server by JSON; return the received object.""" data = {'payload': json.dumps(payload)} if csrf_token: data['csrf_token'] = csrf_token json_response = self.testapp.post( str(url), data, expect_errors=expect_errors, upload_files=upload_files) self.assertEqual(json_response.status_int, expected_status_int) return self._parse_json_response( json_response, expect_errors=expect_errors) def put_json(self, url, payload, csrf_token=None, expect_errors=False, expected_status_int=200): """Put an object to the server by JSON; return the received object.""" data = {'payload': json.dumps(payload)} if csrf_token: data['csrf_token'] = csrf_token json_response = self.testapp.put( str(url), data, expect_errors=expect_errors) self.assertEqual(json_response.status_int, expected_status_int) return self._parse_json_response( json_response, expect_errors=expect_errors) def get_csrf_token_from_response(self, response): """Retrieve the CSRF token from a GET response.""" return re.search(CSRF_REGEX, response.body).group(1) def register_editor(self, email, username=None): """Register a user with the given username as an editor.""" if username is None: username = self.DEFAULT_USERNAME self.login(email) response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL) csrf_token = self.get_csrf_token_from_response(response) response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, { 'csrf_token': csrf_token, 'payload': json.dumps({ 'username': username, 'agreed_to_terms': True }) }) self.assertEqual(response.status_int, 200) self.logout() def set_admins(self, admin_emails): """Set the ADMIN_EMAILS property.""" self.login('superadmin@example.com', is_super_admin=True) response = self.testapp.get('/admin') csrf_token = self.get_csrf_token_from_response(response) self.post_json('/adminhandler', { 'action': 'save_config_properties', 'new_config_property_values': { config_domain.ADMIN_EMAILS.name: admin_emails, } }, csrf_token) self.logout() def set_moderators(self, moderator_emails): """Set the MODERATOR_EMAILS property.""" self.login('superadmin@example.com', is_super_admin=True) response = self.testapp.get('/admin') csrf_token = self.get_csrf_token_from_response(response) self.post_json('/adminhandler', { 'action': 'save_config_properties', 'new_config_property_values': { config_domain.MODERATOR_EMAILS.name: moderator_emails, } }, csrf_token) self.logout() def get_current_logged_in_user_id(self): return os.environ['USER_ID'] def get_user_id_from_email(self, email): return current_user_services.get_user_id_from_email(email) def save_new_default_exploration(self, exploration_id, owner_id, title='A title'): """Saves a new default exploration written by owner_id. Returns the exploration domain object. """ exploration = exp_domain.Exploration.create_default_exploration( exploration_id, title, 'A category') exp_services.save_new_exploration(owner_id, exploration) return exploration def save_new_valid_exploration( self, exploration_id, owner_id, title='A title'): """Saves a new strictly-validated exploration. Returns the exploration domain object. """ exploration = exp_domain.Exploration.create_default_exploration( exploration_id, title, 'A category') exploration.states[exploration.init_state_name].widget.handlers[ 0].rule_specs[0].dest = feconf.END_DEST exploration.objective = 'An objective' exp_services.save_new_exploration(owner_id, exploration) return exploration @contextlib.contextmanager def swap(self, obj, attr, newvalue): """Swap an object's attribute value within the context of a 'with' statement. The object can be anything that supports getattr and setattr, such as class instances, modules, ... Example usage: import math with self.swap(math, "sqrt", lambda x: 42): print math.sqrt(16.0) # prints 42 print math.sqrt(16.0) # prints 4 as expected. """ original = getattr(obj, attr) setattr(obj, attr, newvalue) try: yield finally: setattr(obj, attr, original) class AppEngineTestBase(TestBase): """Base class for tests requiring App Engine services.""" def _delete_all_models(self): from google.appengine.ext import ndb ndb.delete_multi(ndb.Query().iter(keys_only=True)) def setUp(self): empty_environ() from google.appengine.datastore import datastore_stub_util from google.appengine.ext import testbed self.testbed = testbed.Testbed() self.testbed.activate() # Configure datastore policy to emulate instantaneously and globally # consistent HRD. policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy( probability=1) # Declare any relevant App Engine service stubs here. self.testbed.init_user_stub() self.testbed.init_memcache_stub() self.testbed.init_datastore_v3_stub(consistency_policy=policy) self.testbed.init_taskqueue_stub() self.taskqueue_stub = self.testbed.get_stub( testbed.TASKQUEUE_SERVICE_NAME) self.testbed.init_urlfetch_stub() self.testbed.init_files_stub() self.testbed.init_blobstore_stub() # Set up the app to be tested. self.testapp = webtest.TestApp(main.app) def tearDown(self): self.logout() self._delete_all_models() self.testbed.deactivate() def count_jobs_in_taskqueue(self): return len(self.taskqueue_stub.get_filtered_tasks()) def process_and_flush_pending_tasks(self): from google.appengine.ext import deferred tasks = self.taskqueue_stub.get_filtered_tasks() self.taskqueue_stub.FlushQueue('default') while tasks: for task in tasks: if task.url == '/_ah/queue/deferred': deferred.run(task.payload) else: # All other tasks are expected to be mapreduce ones. headers = { key: str(val) for key, val in task.headers.iteritems() } headers['Content-Length'] = str(len(task.payload or '')) response = self.testapp.post( url=str(task.url), params=(task.payload or ''), headers=headers) if response.status_code != 200: raise RuntimeError( 'MapReduce task to URL %s failed' % task.url) tasks = self.taskqueue_stub.get_filtered_tasks() self.taskqueue_stub.FlushQueue('default') if feconf.PLATFORM == 'gae': GenericTestBase = AppEngineTestBase else: raise Exception('Invalid platform: expected one of [\'gae\']')
normal
{ "blob_id": "8a848eece6a3ed07889ba208068de4bfa0ad0bbf", "index": 6744, "step-1": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common utilities for test classes.\"\"\"\n\nimport contextlib\nimport os\nimport re\nimport unittest\nimport webtest\n\nfrom core.domain import config_domain\nfrom core.domain import exp_domain\nfrom core.domain import exp_services\nfrom core.platform import models\ncurrent_user_services = models.Registry.import_current_user_services()\nimport feconf\nimport main\n\nimport json\n\n\nCSRF_REGEX = (\n r'csrf_token: JSON\\.parse\\(\\'\\\\\\\"([A-Za-z0-9/=_-]+)\\\\\\\"\\'\\)')\n# Prefix to append to all lines printed by tests to the console.\nLOG_LINE_PREFIX = 'LOG_INFO_TEST: '\n\n\ndef empty_environ():\n os.environ['AUTH_DOMAIN'] = 'example.com'\n os.environ['SERVER_NAME'] = 'localhost'\n os.environ['HTTP_HOST'] = 'localhost'\n os.environ['SERVER_PORT'] = '8080'\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['USER_IS_ADMIN'] = '0'\n os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (\n os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])\n\n\nclass TestBase(unittest.TestCase):\n \"\"\"Base class for all tests.\"\"\"\n\n maxDiff = 2500\n\n DEFAULT_USERNAME = 'defaultusername'\n\n def setUp(self):\n raise NotImplementedError\n\n def tearDown(self):\n raise NotImplementedError\n\n def log_line(self, line):\n \"\"\"Print the line with a prefix that can be identified by the\n script that calls the test.\n \"\"\"\n print '%s%s' % (LOG_LINE_PREFIX, line)\n\n def _delete_all_models(self):\n raise NotImplementedError\n\n def login(self, email, is_super_admin=False):\n os.environ['USER_EMAIL'] = email\n os.environ['USER_ID'] = self.get_user_id_from_email(email)\n os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'\n\n def logout(self):\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['USER_IS_ADMIN'] = '0'\n\n def shortDescription(self):\n \"\"\"Additional information logged during unit test invocation.\"\"\"\n # Suppress default logging of docstrings.\n return None\n\n def get_expected_login_url(self, slug):\n \"\"\"Returns the expected login URL.\"\"\"\n return current_user_services.create_login_url(slug)\n\n def get_expected_logout_url(self, slug):\n \"\"\"Returns the expected logout URL.\"\"\"\n return current_user_services.create_logout_url(slug)\n\n def _parse_json_response(self, json_response, expect_errors=False):\n \"\"\"Convert a JSON server response to an object (such as a dict).\"\"\"\n if not expect_errors:\n self.assertEqual(json_response.status_int, 200)\n\n self.assertEqual(\n json_response.content_type, 'application/javascript')\n self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))\n\n return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])\n\n def get_json(self, url):\n \"\"\"Get a JSON response, transformed to a Python object.\"\"\"\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)\n\n def post_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200, upload_files=None):\n \"\"\"Post an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.post(\n str(url), data, expect_errors=expect_errors,\n upload_files=upload_files)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def put_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200):\n \"\"\"Put an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.put(\n str(url), data, expect_errors=expect_errors)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def get_csrf_token_from_response(self, response):\n \"\"\"Retrieve the CSRF token from a GET response.\"\"\"\n return re.search(CSRF_REGEX, response.body).group(1)\n\n def register_editor(self, email, username=None):\n \"\"\"Register a user with the given username as an editor.\"\"\"\n if username is None:\n username = self.DEFAULT_USERNAME\n\n self.login(email)\n\n response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)\n csrf_token = self.get_csrf_token_from_response(response)\n\n response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {\n 'csrf_token': csrf_token,\n 'payload': json.dumps({\n 'username': username,\n 'agreed_to_terms': True\n })\n })\n self.assertEqual(response.status_int, 200)\n\n self.logout()\n\n def set_admins(self, admin_emails):\n \"\"\"Set the ADMIN_EMAILS property.\"\"\"\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.ADMIN_EMAILS.name: admin_emails,\n }\n }, csrf_token)\n self.logout()\n\n def set_moderators(self, moderator_emails):\n \"\"\"Set the MODERATOR_EMAILS property.\"\"\"\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.MODERATOR_EMAILS.name: moderator_emails,\n }\n }, csrf_token)\n self.logout()\n\n def get_current_logged_in_user_id(self):\n return os.environ['USER_ID']\n\n def get_user_id_from_email(self, email):\n return current_user_services.get_user_id_from_email(email)\n\n def save_new_default_exploration(self,\n exploration_id, owner_id, title='A title'):\n \"\"\"Saves a new default exploration written by owner_id.\n\n Returns the exploration domain object.\n \"\"\"\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration\n\n def save_new_valid_exploration(\n self, exploration_id, owner_id, title='A title'):\n \"\"\"Saves a new strictly-validated exploration.\n\n Returns the exploration domain object.\n \"\"\"\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exploration.states[exploration.init_state_name].widget.handlers[\n 0].rule_specs[0].dest = feconf.END_DEST\n exploration.objective = 'An objective'\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration\n\n @contextlib.contextmanager\n def swap(self, obj, attr, newvalue):\n \"\"\"Swap an object's attribute value within the context of a\n 'with' statement. The object can be anything that supports\n getattr and setattr, such as class instances, modules, ...\n\n Example usage:\n\n import math\n with self.swap(math, \"sqrt\", lambda x: 42):\n print math.sqrt(16.0) # prints 42\n print math.sqrt(16.0) # prints 4 as expected.\n \"\"\"\n original = getattr(obj, attr)\n setattr(obj, attr, newvalue)\n try:\n yield\n finally:\n setattr(obj, attr, original)\n\n\nclass AppEngineTestBase(TestBase):\n \"\"\"Base class for tests requiring App Engine services.\"\"\"\n\n def _delete_all_models(self):\n from google.appengine.ext import ndb\n ndb.delete_multi(ndb.Query().iter(keys_only=True))\n\n def setUp(self):\n empty_environ()\n\n from google.appengine.datastore import datastore_stub_util\n from google.appengine.ext import testbed\n\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n\n # Configure datastore policy to emulate instantaneously and globally\n # consistent HRD.\n policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(\n probability=1)\n\n # Declare any relevant App Engine service stubs here.\n self.testbed.init_user_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_datastore_v3_stub(consistency_policy=policy)\n self.testbed.init_taskqueue_stub()\n self.taskqueue_stub = self.testbed.get_stub(\n testbed.TASKQUEUE_SERVICE_NAME)\n self.testbed.init_urlfetch_stub()\n self.testbed.init_files_stub()\n self.testbed.init_blobstore_stub()\n\n # Set up the app to be tested.\n self.testapp = webtest.TestApp(main.app)\n\n def tearDown(self):\n self.logout()\n self._delete_all_models()\n self.testbed.deactivate()\n\n def count_jobs_in_taskqueue(self):\n return len(self.taskqueue_stub.get_filtered_tasks())\n\n def process_and_flush_pending_tasks(self):\n from google.appengine.ext import deferred\n\n tasks = self.taskqueue_stub.get_filtered_tasks()\n self.taskqueue_stub.FlushQueue('default')\n while tasks:\n for task in tasks:\n if task.url == '/_ah/queue/deferred':\n deferred.run(task.payload)\n else:\n # All other tasks are expected to be mapreduce ones.\n headers = {\n key: str(val) for key, val in task.headers.iteritems()\n }\n headers['Content-Length'] = str(len(task.payload or ''))\n response = self.testapp.post(\n url=str(task.url), params=(task.payload or ''),\n headers=headers)\n if response.status_code != 200:\n raise RuntimeError(\n 'MapReduce task to URL %s failed' % task.url)\n\n tasks = self.taskqueue_stub.get_filtered_tasks()\n self.taskqueue_stub.FlushQueue('default')\n\n\nif feconf.PLATFORM == 'gae':\n GenericTestBase = AppEngineTestBase\nelse:\n raise Exception('Invalid platform: expected one of [\\'gae\\']')\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in ip_net.hosts(): host_add = str(i) toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE) output = toping.communicate()[0] hostalive = toping.returncode if hostalive == 0: print(host_add, 'is reachable') else: print(host_add, 'is not reachable') <|reserved_special_token_1|> <|reserved_special_token_0|> ip_net = ipaddress.ip_network('192.168.0.100/30') for i in ip_net.hosts(): host_add = str(i) toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE) output = toping.communicate()[0] hostalive = toping.returncode if hostalive == 0: print(host_add, 'is reachable') else: print(host_add, 'is not reachable') <|reserved_special_token_1|> import ipaddress import subprocess from subprocess import Popen, PIPE import time ip_net = ipaddress.ip_network('192.168.0.100/30') for i in ip_net.hosts(): host_add = str(i) toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE) output = toping.communicate()[0] hostalive = toping.returncode if hostalive == 0: print(host_add, 'is reachable') else: print(host_add, 'is not reachable') <|reserved_special_token_1|> import ipaddress import subprocess from subprocess import Popen, PIPE import time ip_net = ipaddress.ip_network('192.168.0.100/30') for i in ip_net.hosts(): # print(i) host_add = str(i) toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE) output = toping.communicate()[0] hostalive = toping.returncode if hostalive == 0: print(host_add,"is reachable") else: print(host_add,"is not reachable") # print(output) # time.sleep(3) # if toping ==0: # print(i, ' is alive') # else: # print(i,' is not alive')
flexible
{ "blob_id": "414fb437783fcfb55f542f072aaf3a8bb02b441e", "index": 8275, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-3": "<mask token>\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-4": "import ipaddress\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport time\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-5": "import ipaddress\r\nimport subprocess\r\nfrom subprocess import Popen, PIPE\r\nimport time\r\n\r\nip_net = ipaddress.ip_network('192.168.0.100/30')\r\nfor i in ip_net.hosts():\r\n # print(i)\r\n host_add = str(i)\r\n toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE)\r\n\r\n output = toping.communicate()[0]\r\n hostalive = toping.returncode\r\n if hostalive == 0:\r\n print(host_add,\"is reachable\")\r\n else:\r\n print(host_add,\"is not reachable\")\r\n # print(output)\r\n # time.sleep(3)\r\n # if toping ==0:\r\n # print(i, ' is alive')\r\n # else:\r\n # print(i,' is not alive')\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import argparse import pickle import pandas as pd from pyspark.sql.session import SparkSession parser = argparse.ArgumentParser() parser.add_argument('--rs', type=str, nargs='+') args = parser.parse_args() ss = SparkSession.builder.getOrCreate() post_df = None for f in args.rs: df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title') post_df = df if post_df is None else post_df.union(df) subreddit_ids = pickle.load(open('subreddit_ids', 'rb')) ret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1) ret.write.orc('RS.orc', mode='overwrite') ret.write.json('RS.json', mode='overwrite')
normal
{ "blob_id": "e6b3def6ed6f2523d88912832a876caf2742b786", "index": 7572, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('--rs', type=str, nargs='+')\n<mask token>\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\n<mask token>\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n", "step-4": "import argparse\nimport pickle\nimport pandas as pd\nfrom pyspark.sql.session import SparkSession\nparser = argparse.ArgumentParser()\nparser.add_argument('--rs', type=str, nargs='+')\nargs = parser.parse_args()\nss = SparkSession.builder.getOrCreate()\npost_df = None\nfor f in args.rs:\n df = ss.read.json(f).select('id', 'subreddit', 'subreddit_id', 'title')\n post_df = df if post_df is None else post_df.union(df)\nsubreddit_ids = pickle.load(open('subreddit_ids', 'rb'))\nret = post_df.filter(post_df.subreddit_id.isin(*subreddit_ids)).coalesce(1)\nret.write.orc('RS.orc', mode='overwrite')\nret.write.json('RS.json', mode='overwrite')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(1, 11): if fave_fast_food in d: d[fave_fast_food] += 1 else: d[fave_fast_food] = 1 count += 1 fave_fast_food = input('Fave fast food restaurant: ') for k, v in d.items(): print('Fast Food Resturants that are ' + k + ': ' + str(v)) <|reserved_special_token_0|> print('The fast food restaurant ' + maximum + ' has this many votes:', d[ maximum]) <|reserved_special_token_1|> d = dict() count = 0 fave_fast_food = input('Fave fast food restaurant: ') for i in range(1, 11): if fave_fast_food in d: d[fave_fast_food] += 1 else: d[fave_fast_food] = 1 count += 1 fave_fast_food = input('Fave fast food restaurant: ') for k, v in d.items(): print('Fast Food Resturants that are ' + k + ': ' + str(v)) maximum = max(d, key=d.get) print('The fast food restaurant ' + maximum + ' has this many votes:', d[ maximum]) <|reserved_special_token_1|> # Your code here d = dict() count = 0 fave_fast_food = input("Fave fast food restaurant: ") for i in range(1, 11): if fave_fast_food in d: d[fave_fast_food] += 1 else: d[fave_fast_food] = 1 count+= 1 fave_fast_food = input("Fave fast food restaurant: ") for k,v in d.items(): print('Fast Food Resturants that are ' + k + ": " + str(v)) maximum = max(d, key=d.get) # Just use 'min' instead of 'max' for minimum. print("The fast food restaurant " + maximum + " has this many votes:", d[maximum])
flexible
{ "blob_id": "a494b3469682a909b76e67e1b78ad25affe99f24", "index": 8688, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count += 1\n fave_fast_food = input('Fave fast food restaurant: ')\nfor k, v in d.items():\n print('Fast Food Resturants that are ' + k + ': ' + str(v))\n<mask token>\nprint('The fast food restaurant ' + maximum + ' has this many votes:', d[\n maximum])\n", "step-3": "d = dict()\ncount = 0\nfave_fast_food = input('Fave fast food restaurant: ')\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count += 1\n fave_fast_food = input('Fave fast food restaurant: ')\nfor k, v in d.items():\n print('Fast Food Resturants that are ' + k + ': ' + str(v))\nmaximum = max(d, key=d.get)\nprint('The fast food restaurant ' + maximum + ' has this many votes:', d[\n maximum])\n", "step-4": "# Your code here\nd = dict()\ncount = 0\nfave_fast_food = input(\"Fave fast food restaurant: \")\n\nfor i in range(1, 11):\n if fave_fast_food in d:\n d[fave_fast_food] += 1\n else:\n d[fave_fast_food] = 1\n count+= 1\n fave_fast_food = input(\"Fave fast food restaurant: \")\n\nfor k,v in d.items():\n print('Fast Food Resturants that are ' + k + \": \" + str(v))\n\nmaximum = max(d, key=d.get) # Just use 'min' instead of 'max' for minimum.\nprint(\"The fast food restaurant \" + maximum + \" has this many votes:\", d[maximum])", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_babi_en(get_10k=False): data_dir = 'datasets/tasks_1-20_v1-2/en/' if get_10k == True: data_dir = 'datasets/tasks_1-20_v1-2/en-10k/' maybe_download( 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123) file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz') file.extractall('datasets') file.close() print('Some housekeeping...') if not os.path.exists('datasets/babi'): os.makedirs('datasets/babi') for path, dir, files in os.walk(data_dir): for file in files: os.rename(os.path.join(data_dir, file), os.path.join( 'datasets/babi', file)) os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz') rmtree('datasets/tasks_1-20_v1-2') print('Finished.') <|reserved_special_token_1|> from utils import maybe_download from shutil import rmtree import os import tarfile def get_babi_en(get_10k=False): data_dir = 'datasets/tasks_1-20_v1-2/en/' if get_10k == True: data_dir = 'datasets/tasks_1-20_v1-2/en-10k/' maybe_download( 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123) file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz') file.extractall('datasets') file.close() print('Some housekeeping...') if not os.path.exists('datasets/babi'): os.makedirs('datasets/babi') for path, dir, files in os.walk(data_dir): for file in files: os.rename(os.path.join(data_dir, file), os.path.join( 'datasets/babi', file)) os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz') rmtree('datasets/tasks_1-20_v1-2') print('Finished.') <|reserved_special_token_1|> # Get Facebook's bAbi dataset from utils import maybe_download from shutil import rmtree import os import tarfile def get_babi_en(get_10k=False): data_dir = "datasets/tasks_1-20_v1-2/en/" if get_10k == True: data_dir = "datasets/tasks_1-20_v1-2/en-10k/" maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123) file = tarfile.open("datasets/babi_tasks_1-20_v1-2.tar.gz", "r:gz") file.extractall("datasets") file.close() print("Some housekeeping...") if not os.path.exists("datasets/babi"): os.makedirs("datasets/babi") for path, dir, files in os.walk(data_dir): for file in files: os.rename(os.path.join(data_dir, file), os.path.join("datasets/babi", file)) os.remove("datasets/babi_tasks_1-20_v1-2.tar.gz") rmtree("datasets/tasks_1-20_v1-2") print("Finished.")
flexible
{ "blob_id": "7a4d04bd60b5f5555982af372145f9f4bcd83ca2", "index": 8194, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef get_babi_en(get_10k=False):\n data_dir = 'datasets/tasks_1-20_v1-2/en/'\n if get_10k == True:\n data_dir = 'datasets/tasks_1-20_v1-2/en-10k/'\n maybe_download(\n 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz',\n 'datasets', 11745123)\n file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz')\n file.extractall('datasets')\n file.close()\n print('Some housekeeping...')\n if not os.path.exists('datasets/babi'):\n os.makedirs('datasets/babi')\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\n 'datasets/babi', file))\n os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz')\n rmtree('datasets/tasks_1-20_v1-2')\n print('Finished.')\n", "step-3": "from utils import maybe_download\nfrom shutil import rmtree\nimport os\nimport tarfile\n\n\ndef get_babi_en(get_10k=False):\n data_dir = 'datasets/tasks_1-20_v1-2/en/'\n if get_10k == True:\n data_dir = 'datasets/tasks_1-20_v1-2/en-10k/'\n maybe_download(\n 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz',\n 'datasets', 11745123)\n file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz')\n file.extractall('datasets')\n file.close()\n print('Some housekeeping...')\n if not os.path.exists('datasets/babi'):\n os.makedirs('datasets/babi')\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\n 'datasets/babi', file))\n os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz')\n rmtree('datasets/tasks_1-20_v1-2')\n print('Finished.')\n", "step-4": "# Get Facebook's bAbi dataset\nfrom utils import maybe_download\nfrom shutil import rmtree\nimport os\nimport tarfile\n\ndef get_babi_en(get_10k=False):\n data_dir = \"datasets/tasks_1-20_v1-2/en/\"\n if get_10k == True:\n data_dir = \"datasets/tasks_1-20_v1-2/en-10k/\"\n \n maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123)\n file = tarfile.open(\"datasets/babi_tasks_1-20_v1-2.tar.gz\", \"r:gz\")\n file.extractall(\"datasets\")\n file.close()\n print(\"Some housekeeping...\")\n if not os.path.exists(\"datasets/babi\"):\n os.makedirs(\"datasets/babi\")\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\"datasets/babi\", file)) \n os.remove(\"datasets/babi_tasks_1-20_v1-2.tar.gz\")\n rmtree(\"datasets/tasks_1-20_v1-2\")\n print(\"Finished.\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> def calcula_norma(x): lista = [] for e in x: lista.append(e ** 2) v = sum(lista) ** (1 / 2) return v <|reserved_special_token_1|> def calcula_norma(x): lista=[] for e in x: lista.append(e**2) v=(sum(lista)**(1/2)) return v
flexible
{ "blob_id": "7346992d69250240207a0fc981d0adc245e69f87", "index": 5206, "step-1": "<mask token>\n", "step-2": "def calcula_norma(x):\n lista = []\n for e in x:\n lista.append(e ** 2)\n v = sum(lista) ** (1 / 2)\n return v\n", "step-3": "def calcula_norma(x):\n lista=[]\n for e in x:\n lista.append(e**2)\n v=(sum(lista)**(1/2))\n return v ", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import keras from keras.applications import VGG16 from keras.preprocessing.image import ImageDataGenerator from keras.models import Model import matplotlib.pyplot as plt from keras.callbacks import History import numpy as np import os import cPickle as pickle import scipy from scipy import spatial def getModel( output_dim ): ''' * output_dim: the number of classes (int) * return: compiled model (keras.engine.training.Model) ''' vgg_model = VGG16( weights='imagenet', include_top=True ) vgg_out = vgg_model.layers[-2].output #Last FC layer's output #Create softmax layer taking input as vgg_out regression_layer = keras.layers.core.Dense(output_dim, init='lecun_uniform')(vgg_out) #Create new transfer learning model tl_model = Model( input=vgg_model.input, output=regression_layer) #Freeze all layers of VGG16 and Compile the model for layers in vgg_model.layers: layers.trainable = False; tl_model.compile(optimizer='Nadam', loss='cosine_proximity') #Confirm the model is appropriate tl_model.summary() return tl_model if __name__ == '__main__': #Output dim for your dataset output_dim = 300 #For word2vec output # Training parameters batchSize = 100 numEpochs = 15 tl_model = getModel( output_dim ) trainClass = np.load('caltech250TrainClass.npy') trainLabel = np.load('caltech250TrainWordvec.npy') valData = np.load('caltech250ValData.npy') valClass = np.load('caltech250ValClass.npy') valLabel = np.load('caltech250ValWordvec.npy') # Input data generator train_datagen = ImageDataGenerator( featurewise_center = True) train_generator = train_datagen.flow_from_directory( 'resizedCaltech250Train', target_size= (224,224), class_mode = 'sparse', batch_size = batchSize) test_datagen = ImageDataGenerator( featurewise_center = True) test_generator = test_datagen.flow_from_directory( 'resizedCaltech250Val', target_size=(224,224), class_mode = 'sparse', batch_size = batchSize) train_datagen.fit(valData) test_datagen.fit(valData) caltechDict = pickle.load(open('caltech256Dict.pkl')) epoch = 0 numImg = 0 numImgsPerEpoch = 23954 classLabels = os.listdir('./resizedCaltech250Train/') classTargets = [caltechDict[key] for key in caltechDict] classLabels = [key for key in caltechDict] ind = np.argsort(classLabels) classLabels.sort() classTargets = [classTargets[i] for i in ind] print("Epoch 0") batchCount = 0 for batch in train_generator: imgs = batch[0] labels = batch[1] wordEmbed = [np.asarray(caltechDict[classLabels[classInd]]) for classInd in labels] tl_model.train_on_batch(imgs,np.asarray(wordEmbed)) #print(batchCount) numImg += batchSize batchCount += 1 # Epoch checkpoint if numImg > numImgsPerEpoch: tl_model.save('test.hd5') epoch += 1 numImg = 0 print('Epoch: ' + str(epoch)) # Calculate validation loss after each epoch loss = 0 imgCount = 0 numbatch = 1 print("Calculating validation loss") # Get validation set labels from cosine similarity predictions = tl_model.predict(valData, batch_size = 100, verbose = 1) #print('Validation Loss: ' + str(spatial.distance.cosine(predictions,valLabel))) valLoss = [spatial.distance.cosine(predictions[j,:],valLabel[j,:]) for j in range(valClass.shape[0])] print('Validation Loss: ' + str(sum(valLoss)/valClass.shape[0])) dist = [[spatial.distance.cosine(x,y) for y in classTargets] for x in predictions] ind = np.argmin(dist,1) predictedLabel = [classLabels[x] for x in ind] # Calculate error rate correct = 0 for i,label in enumerate(valClass): if valClass[i] == predictedLabel[i]: correct += 1 print('Validation Accuracy: ' + str(float(correct)/valClass.shape[0])) print("Calculating Training Accuracy and Loss") t_preds = np.array([]) t_targets = np.array([]) t_numImg = 0 t_batch_count = 1 for t_batch in train_generator: #print(' train batch ' + str(t_batch_count)) t_numImg += batchSize t_imgs = t_batch[0] t_batch_preds = tl_model.predict_on_batch(t_imgs) t_batch_targets = t_batch[1] #print(classLabels[t_batch_targets[0]]) t_preds = np.vstack([t_preds, t_batch_preds]) if t_preds.size else t_batch_preds t_targets = np.hstack([t_targets, t_batch_targets]) if t_targets.size else t_batch_targets #print(' size ' + str(np.shape(t_targets))) t_batch_count += 1 if t_numImg >= numImgsPerEpoch: break dist = [[spatial.distance.cosine(x,y) for y in classTargets] for x in t_preds] ind = np.argmin(dist,1) predictedLabel = [classLabels[x] for x in ind] # Calculate Training Accuracy correct = 0 #for i,label in enumerate(trainClass): for i in range(numImgsPerEpoch): #if trainClass[i] == predictedLabel[i]: if classLabels[t_targets[i]] == predictedLabel[i]: correct += 1 print('Training Accuracy: ' + str(float(correct)/trainClass.shape[0])) trainLoss = [spatial.distance.cosine(t_preds[sample,:],trainLabel[sample,:]) for sample in range(trainClass.shape[0])] print('Training Loss: ' + str(sum(trainLoss)/trainClass.shape[0])) ''' for valBatch in test_generator: imgs = valBatch[0] labels = valBatch[1] wordEmbed = [np.asarray(caltechDict[classLabels[classInd]]) for classInd in labels] loss += np.sum(tl_model.test_on_batch(imgs,np.asarray(wordEmbed))) imgCount += batchSize if imgCount > 1: print("Validation: " + str(loss/imgCount)) loss = 0 imgCount = 0 break ''' if epoch >= numEpochs: break #Test the model ''' plt.plot(history.history['acc']) plt.show() plt.figure() plt.plot(history.history['loss']) plt.show() plt.figure() plt.plot(history.history['val_acc']) plt.show() plt.figure() plt.plot(history.history['val_loss']) plt.show() epoch = 0 numImg = 0 classLabels = os.listdir('C:/Users/xsaardo/Desktop/Caltech97Train/') for batch in train_generator: imgs = batch[0] labels = batch[1] print(batch[0].shape) print(classLabels[np.argmax(batch[1][0,:])]) img = np.reshape(batch[0][0,:,:,:],(224,224,3)).astype('uint8') print(img.shape) plt.imshow(img) plt.show() break; numImg += batchSize if numImg > numImgsPerEpoch: epoch += 1 if epoch > numEpochs: break '''
normal
{ "blob_id": "461b2de86907047df53c3857c6b0397e77de3fcd", "index": 5139, "step-1": "import keras\r\nfrom keras.applications import VGG16\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Model\r\nimport matplotlib.pyplot as plt\r\nfrom keras.callbacks import History\r\nimport numpy as np\r\nimport os\r\nimport cPickle as pickle\r\nimport scipy\r\nfrom scipy import spatial\r\n\r\ndef getModel( output_dim ):\r\n ''' \r\n * output_dim: the number of classes (int)\r\n \r\n * return: compiled model (keras.engine.training.Model)\r\n '''\r\n vgg_model = VGG16( weights='imagenet', include_top=True )\r\n vgg_out = vgg_model.layers[-2].output #Last FC layer's output \r\n \r\n #Create softmax layer taking input as vgg_out\r\n regression_layer = keras.layers.core.Dense(output_dim,\r\n init='lecun_uniform')(vgg_out)\r\n \r\n #Create new transfer learning model\r\n tl_model = Model( input=vgg_model.input, output=regression_layer)\r\n \r\n #Freeze all layers of VGG16 and Compile the model\r\n for layers in vgg_model.layers:\r\n layers.trainable = False;\r\n \r\n tl_model.compile(optimizer='Nadam',\r\n loss='cosine_proximity')\r\n \r\n #Confirm the model is appropriate\r\n tl_model.summary()\r\n\r\n return tl_model\r\n\r\nif __name__ == '__main__':\r\n #Output dim for your dataset\r\n output_dim = 300 #For word2vec output \r\n \r\n # Training parameters\r\n batchSize = 100 \r\n numEpochs = 15\r\n \r\n tl_model = getModel( output_dim ) \r\n trainClass = np.load('caltech250TrainClass.npy')\r\n trainLabel = np.load('caltech250TrainWordvec.npy') \r\n valData = np.load('caltech250ValData.npy')\r\n valClass = np.load('caltech250ValClass.npy')\r\n valLabel = np.load('caltech250ValWordvec.npy')\r\n\r\n # Input data generator\r\n train_datagen = ImageDataGenerator(\r\n\tfeaturewise_center = True)\r\n \r\n train_generator = train_datagen.flow_from_directory(\r\n 'resizedCaltech250Train',\r\n target_size= (224,224),\r\n class_mode = 'sparse',\r\n batch_size = batchSize)\r\n \r\n test_datagen = ImageDataGenerator(\r\n\tfeaturewise_center = True)\r\n \r\n test_generator = test_datagen.flow_from_directory(\r\n 'resizedCaltech250Val',\r\n target_size=(224,224),\r\n class_mode = 'sparse',\r\n batch_size = batchSize)\r\n\r\n train_datagen.fit(valData)\r\n test_datagen.fit(valData)\r\n \r\n caltechDict = pickle.load(open('caltech256Dict.pkl'))\r\n\r\n epoch = 0\r\n numImg = 0\r\n numImgsPerEpoch = 23954\r\n classLabels = os.listdir('./resizedCaltech250Train/')\r\n classTargets = [caltechDict[key] for key in caltechDict]\r\n classLabels = [key for key in caltechDict] \r\n\r\n ind = np.argsort(classLabels)\r\n classLabels.sort()\r\n classTargets = [classTargets[i] for i in ind]\r\n\r\n print(\"Epoch 0\")\r\n batchCount = 0\t \r\n for batch in train_generator:\r\n imgs = batch[0]\r\n labels = batch[1]\r\n\r\n wordEmbed = [np.asarray(caltechDict[classLabels[classInd]]) for classInd in labels]\r\n tl_model.train_on_batch(imgs,np.asarray(wordEmbed))\r\n\t#print(batchCount)\r\n numImg += batchSize\r\n batchCount += 1\r\n # Epoch checkpoint\r\n if numImg > numImgsPerEpoch:\r\n\t tl_model.save('test.hd5')\r\n epoch += 1\r\n numImg = 0\r\n print('Epoch: ' + str(epoch))\r\n \r\n # Calculate validation loss after each epoch\r\n loss = 0\r\n imgCount = 0\r\n numbatch = 1\r\n\t print(\"Calculating validation loss\")\r\n\r\n\r\n\t # Get validation set labels from cosine similarity\r\n\t predictions = tl_model.predict(valData, batch_size = 100, verbose = 1)\r\n\t #print('Validation Loss: ' + str(spatial.distance.cosine(predictions,valLabel)))\r\n\t valLoss = [spatial.distance.cosine(predictions[j,:],valLabel[j,:]) for j in range(valClass.shape[0])]\r\n\t print('Validation Loss: ' + str(sum(valLoss)/valClass.shape[0]))\r\n\t dist = [[spatial.distance.cosine(x,y) for y in classTargets] for x in predictions]\r\n\t ind = np.argmin(dist,1)\r\n\t predictedLabel = [classLabels[x] for x in ind]\r\n\r\n\t # Calculate error rate\r\n\t correct = 0\r\n\t for i,label in enumerate(valClass):\r\n\t\tif valClass[i] == predictedLabel[i]:\r\n\t\t correct += 1\r\n\r\n\t print('Validation Accuracy: ' + str(float(correct)/valClass.shape[0]))\r\n\r\n\t print(\"Calculating Training Accuracy and Loss\")\r\n t_preds = np.array([])\r\n t_targets = np.array([])\r\n t_numImg = 0\r\n t_batch_count = 1\r\n for t_batch in train_generator:\r\n\r\n #print(' train batch ' + str(t_batch_count))\r\n t_numImg += batchSize\r\n t_imgs = t_batch[0]\r\n\r\n t_batch_preds = tl_model.predict_on_batch(t_imgs)\r\n t_batch_targets = t_batch[1]\r\n\r\n #print(classLabels[t_batch_targets[0]])\r\n\r\n t_preds = np.vstack([t_preds, t_batch_preds]) if t_preds.size else t_batch_preds\r\n t_targets = np.hstack([t_targets, t_batch_targets]) if t_targets.size else t_batch_targets\r\n #print(' size ' + str(np.shape(t_targets)))\r\n\r\n t_batch_count += 1\r\n if t_numImg >= numImgsPerEpoch:\r\n break\r\n\r\n dist = [[spatial.distance.cosine(x,y) for y in classTargets] for x in t_preds]\r\n ind = np.argmin(dist,1)\r\n predictedLabel = [classLabels[x] for x in ind]\r\n\r\n # Calculate Training Accuracy\r\n correct = 0\r\n #for i,label in enumerate(trainClass):\r\n for i in range(numImgsPerEpoch):\r\n #if trainClass[i] == predictedLabel[i]:\r\n if classLabels[t_targets[i]] == predictedLabel[i]:\r\n correct += 1\r\n print('Training Accuracy: ' + str(float(correct)/trainClass.shape[0]))\r\n\t \r\n\t trainLoss = [spatial.distance.cosine(t_preds[sample,:],trainLabel[sample,:]) for sample in range(trainClass.shape[0])]\r\n\t print('Training Loss: ' + str(sum(trainLoss)/trainClass.shape[0]))\r\n\t '''\r\n for valBatch in test_generator:\r\n imgs = valBatch[0]\r\n labels = valBatch[1]\r\n wordEmbed = [np.asarray(caltechDict[classLabels[classInd]]) for classInd in labels]\r\n loss += np.sum(tl_model.test_on_batch(imgs,np.asarray(wordEmbed)))\r\n imgCount += batchSize\r\n if imgCount > 1:\r\n print(\"Validation: \" + str(loss/imgCount))\r\n\t\t loss = 0\r\n\t\t imgCount = 0\r\n break\r\n '''\r\n if epoch >= numEpochs:\r\n break\r\n \r\n \r\n #Test the model\r\n '''\r\n plt.plot(history.history['acc'])\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(history.history['loss'])\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(history.history['val_acc'])\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(history.history['val_loss'])\r\n plt.show()\r\n \r\n epoch = 0\r\n numImg = 0\r\n classLabels = os.listdir('C:/Users/xsaardo/Desktop/Caltech97Train/')\r\n \r\n for batch in train_generator:\r\n imgs = batch[0]\r\n labels = batch[1]\r\n \r\n \r\n \r\n print(batch[0].shape)\r\n print(classLabels[np.argmax(batch[1][0,:])])\r\n img = np.reshape(batch[0][0,:,:,:],(224,224,3)).astype('uint8')\r\n print(img.shape)\r\n plt.imshow(img)\r\n plt.show()\r\n break;\r\n \r\n numImg += batchSize\r\n if numImg > numImgsPerEpoch:\r\n epoch += 1\r\n if epoch > numEpochs:\r\n break\r\n \r\n \r\n '''\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential, Model from keras.applications import InceptionV3 from keras.callbacks import ModelCheckpoint from keras.optimizers import SGD from keras.layers import Flatten,Dense,Dropout from keras.preprocessing.image import img_to_array from keras import backend as K K.set_image_dim_ordering('th') import numpy as np from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.metrics import f1_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score import os ####################################################################################################################### ####################################################################################################################### # ПАРАМЕТРЫ ДЛЯ ИЗМЕНЕНИЯ!!!!!!!!!!!!!!! path=r"C:\Users\Dmitry\Desktop\CNN_Research_2018\cats_vs_dogs" image_size=(150, 150) # здесь задавать размер входных изображений batch_size=32 # выбирать из этих значений [8,16,32] batch_size2=100 inception_nontrainable_layers_count=205 # количество слоёв InceptionV3, чьи веса мы не меняем при обучении, в процессе переноса обучения(transfer learning) nb_epoch=1# количество эпох обучения нейронной сети fc_nb_epoch=10# количество эпох обучения классификационной части сети n_classes=2# количество классов для обучения train_path=os.path.join(path,"test")# FIXME train!!! validation_path=os.path.join(path,'validation') test_path=os.path.join(path,'test') path_to_save_np=path# путь для сохранений нумпай-массивов ПОМЕНЯЙТЕ НА СВОЙ ПУТЬ, КУДА ХОТИТЕ СОХРАНЯТЬ 10 ГБ!!! ######################################################################################################################## ######################################################################################################################## inc_model=InceptionV3(include_top=False, weights='imagenet', input_shape=((3, image_size[0], image_size[1]))) bottleneck_datagen = ImageDataGenerator(rescale=1. / 255) # собственно, генератор train_generator = bottleneck_datagen.flow_from_directory(train_path, target_size=image_size, batch_size=batch_size, class_mode=None, shuffle=False) validation_generator = bottleneck_datagen.flow_from_directory(validation_path, target_size=image_size, batch_size=batch_size, class_mode=None, shuffle=False) bottleneck_features_train = inc_model.predict_generator(train_generator,steps=int(len(train_generator.filenames)/batch_size))# пока не разобрался со steps, мб для универсальности опустить этот параметр np.save(open(path_to_save_np+'/bn_features_train.npy', 'wb'), bottleneck_features_train) bottleneck_features_validation = inc_model.predict_generator(validation_generator,int(len(validation_generator.filenames)/batch_size)) np.save(open(path_to_save_np+'/bn_features_validation.npy', 'wb'), bottleneck_features_validation) train_data = np.load(open(os.path.join(path_to_save_np,'bn_features_train.npy'), 'rb')) train_labels = np.array([0] * int(train_data.shape[0]/2) + [1] * int(train_data.shape[0]/2)) validation_data = np.load(open(os.path.join(path_to_save_np,'bn_features_validation.npy'), 'rb')) validation_labels = np.array([0] * int(validation_data.shape[0]/2) + [1] * int(validation_data.shape[0]/2)) # за счёт отсутсвия перемешивания(shuffle=False) в генераторе(flow_from_directory) fc_model = Sequential() fc_model.add(Flatten(input_shape=train_data.shape[1:])) fc_model.add(Dense(64, activation='relu', name='dense_one')) fc_model.add(Dropout(0.5, name='dropout_one')) fc_model.add(Dense(64, activation='relu', name='dense_two')) fc_model.add(Dropout(0.5, name='dropout_two')) if n_classes==2: fc_model.add(Dense(1, activation='sigmoid', name='output')) else: fc_model.add(Dense(n_classes, activation='softmax', name='output')) fc_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) fc_model.fit(train_data, train_labels, nb_epoch=fc_nb_epoch, batch_size=batch_size, validation_data=(validation_data, validation_labels)) fc_model.save_weights(os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5')) # сохраняем веса fc_model.evaluate(validation_data, validation_labels) ################################################################################################ # PART 2 UNITE 2 MODELS weights_filename=os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5') x = Flatten()(inc_model.output) x = Dense(64, activation='relu', name='dense_one')(x) x = Dropout(0.5, name='dropout_one')(x) x = Dense(64, activation='relu', name='dense_two')(x) x = Dropout(0.5, name='dropout_two')(x) if n_classes==2: top_model=Dense(1, activation='sigmoid', name='output')(x) else: top_model = Dense(n_classes, activation='softmax', name='output')(x) model = Model(input=inc_model.input, output=top_model) model.load_weights(weights_filename, by_name=True) # загрузить веса в определённые слои по имени (by_name=True) for layer in inc_model.layers[:inception_nontrainable_layers_count]: layer.trainable = False model.compile(loss='binary_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.9), #optimizer='rmsprop', metrics=['accuracy']) # тонкая настройка (в первый раз использовали RMSProp, во второй раз используем стохастический градиентный бустинг для того, чтобы веса не слищком сильно обновлялись) filepath=os.path.join(path_to_save_np,"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5") checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) # здесь происходит аугментация данных, в частности, горизонтальное отражение test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( train_path, target_size=image_size, batch_size=batch_size, class_mode='binary') validation_generator = test_datagen.flow_from_directory( validation_path, target_size=image_size, batch_size=batch_size, class_mode='binary') pred_generator=test_datagen.flow_from_directory(validation_path, target_size=image_size, batch_size=batch_size2, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=np.ceil(train_data.shape[0]/batch_size), epochs=2, validation_data=validation_generator, validation_steps=np.ceil(validation_data.shape[0]/batch_size), callbacks=callbacks_list) model.evaluate_generator(pred_generator, val_samples=batch_size2)# val_samples должен быть равен величине батча в генераторе!!! imgs,labels=pred_generator.next() # загружает изображения в генератор и присваивает ей label array_imgs=np.transpose(np.asarray([img_to_array(img) for img in imgs]),(0,2,1,3)) predictions=model.predict(imgs) rounded_pred=np.asarray([np.round(i) for i in predictions]) print("Accuracy score: "+str(accuracy_score(labels,rounded_pred))) print("Confusion matrix: ") print(confusion_matrix(labels,rounded_pred)) print("F1-score(average='macro'): "+str(f1_score(labels, rounded_pred, average='macro'))) print("F1-score(average='micro'): "+str(f1_score(labels, rounded_pred, average='micro'))) print("F1-score(average='weighted'): "+str(f1_score(labels, rounded_pred, average='weighted'))) print("F1-score(average=None): ") print(f1_score(labels, rounded_pred, average=None)) print("Precision-score(average='macro'):"+str(precision_score(labels, rounded_pred, average='macro'))) print("Precision-score(average='micro'): "+str(precision_score(labels, rounded_pred, average='micro'))) print("Precision-score(average='weighted'): "+str(precision_score(labels, rounded_pred, average='weighted'))) print("Precision-score(average=None): ") print(precision_score(labels, rounded_pred, average=None)) print("Recall-score(average='macro'):"+str(recall_score(labels, rounded_pred, average='macro'))) print("Recall-score(average='micro'): "+str(recall_score(labels, rounded_pred, average='micro'))) print("Recall-score(average='weighted'): "+str(recall_score(labels, rounded_pred, average='weighted'))) print("Recall-score(average=None): ") print(recall_score(labels, rounded_pred, average=None)) if n_classes==2: print("ROC_AUC score:" + str(roc_auc_score(labels, rounded_pred))) print("F1-score(average='binary'): " + str(f1_score(labels, rounded_pred, average='binary'))) print("Precision-score(average='binary'):" + str(precision_score(labels, rounded_pred, average='binary'))) print("Recall-score(average='binary'):" + str(recall_score(labels, rounded_pred, average='binary')))
normal
{ "blob_id": "17a442a85b910ff47c2f3f01242b7f64a6237146", "index": 9380, "step-1": "from keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential, Model\r\nfrom keras.applications import InceptionV3\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.optimizers import SGD\r\nfrom keras.layers import Flatten,Dense,Dropout\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras import backend as K\r\nK.set_image_dim_ordering('th')\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nimport os\r\n#######################################################################################################################\r\n#######################################################################################################################\r\n# ПАРАМЕТРЫ ДЛЯ ИЗМЕНЕНИЯ!!!!!!!!!!!!!!!\r\npath=r\"C:\\Users\\Dmitry\\Desktop\\CNN_Research_2018\\cats_vs_dogs\"\r\nimage_size=(150, 150) # здесь задавать размер входных изображений\r\nbatch_size=32 # выбирать из этих значений [8,16,32]\r\nbatch_size2=100\r\ninception_nontrainable_layers_count=205 # количество слоёв InceptionV3, чьи веса мы не меняем при обучении, в процессе переноса обучения(transfer learning)\r\nnb_epoch=1# количество эпох обучения нейронной сети\r\nfc_nb_epoch=10# количество эпох обучения классификационной части сети\r\nn_classes=2# количество классов для обучения\r\ntrain_path=os.path.join(path,\"test\")# FIXME train!!!\r\nvalidation_path=os.path.join(path,'validation')\r\ntest_path=os.path.join(path,'test')\r\npath_to_save_np=path# путь для сохранений нумпай-массивов ПОМЕНЯЙТЕ НА СВОЙ ПУТЬ, КУДА ХОТИТЕ СОХРАНЯТЬ 10 ГБ!!!\r\n########################################################################################################################\r\n########################################################################################################################\r\ninc_model=InceptionV3(include_top=False,\r\n weights='imagenet',\r\n input_shape=((3, image_size[0], image_size[1])))\r\nbottleneck_datagen = ImageDataGenerator(rescale=1. / 255) # собственно, генератор\r\ntrain_generator = bottleneck_datagen.flow_from_directory(train_path,\r\n target_size=image_size,\r\n batch_size=batch_size,\r\n class_mode=None,\r\n shuffle=False)\r\nvalidation_generator = bottleneck_datagen.flow_from_directory(validation_path,\r\n target_size=image_size,\r\n batch_size=batch_size,\r\n class_mode=None,\r\n shuffle=False)\r\nbottleneck_features_train = inc_model.predict_generator(train_generator,steps=int(len(train_generator.filenames)/batch_size))# пока не разобрался со steps, мб для универсальности опустить этот параметр\r\nnp.save(open(path_to_save_np+'/bn_features_train.npy', 'wb'), bottleneck_features_train)\r\nbottleneck_features_validation = inc_model.predict_generator(validation_generator,int(len(validation_generator.filenames)/batch_size))\r\nnp.save(open(path_to_save_np+'/bn_features_validation.npy', 'wb'), bottleneck_features_validation)\r\ntrain_data = np.load(open(os.path.join(path_to_save_np,'bn_features_train.npy'), 'rb'))\r\ntrain_labels = np.array([0] * int(train_data.shape[0]/2) + [1] * int(train_data.shape[0]/2))\r\nvalidation_data = np.load(open(os.path.join(path_to_save_np,'bn_features_validation.npy'), 'rb'))\r\nvalidation_labels = np.array([0] * int(validation_data.shape[0]/2) + [1] * int(validation_data.shape[0]/2)) # за счёт отсутсвия перемешивания(shuffle=False) в генераторе(flow_from_directory)\r\nfc_model = Sequential()\r\nfc_model.add(Flatten(input_shape=train_data.shape[1:]))\r\nfc_model.add(Dense(64, activation='relu', name='dense_one'))\r\nfc_model.add(Dropout(0.5, name='dropout_one'))\r\nfc_model.add(Dense(64, activation='relu', name='dense_two'))\r\nfc_model.add(Dropout(0.5, name='dropout_two'))\r\nif n_classes==2:\r\n fc_model.add(Dense(1, activation='sigmoid', name='output'))\r\nelse:\r\n fc_model.add(Dense(n_classes, activation='softmax', name='output'))\r\nfc_model.compile(optimizer='rmsprop',\r\n loss='binary_crossentropy',\r\n metrics=['accuracy'])\r\nfc_model.fit(train_data, train_labels,\r\n nb_epoch=fc_nb_epoch, batch_size=batch_size,\r\n validation_data=(validation_data, validation_labels))\r\nfc_model.save_weights(os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5')) # сохраняем веса\r\nfc_model.evaluate(validation_data, validation_labels)\r\n################################################################################################\r\n# PART 2 UNITE 2 MODELS\r\nweights_filename=os.path.join(path_to_save_np,'fc_inception_cats_dogs_250.hdf5')\r\nx = Flatten()(inc_model.output)\r\nx = Dense(64, activation='relu', name='dense_one')(x)\r\nx = Dropout(0.5, name='dropout_one')(x)\r\nx = Dense(64, activation='relu', name='dense_two')(x)\r\nx = Dropout(0.5, name='dropout_two')(x)\r\nif n_classes==2:\r\n top_model=Dense(1, activation='sigmoid', name='output')(x)\r\nelse:\r\n top_model = Dense(n_classes, activation='softmax', name='output')(x)\r\nmodel = Model(input=inc_model.input, output=top_model)\r\nmodel.load_weights(weights_filename, by_name=True) # загрузить веса в определённые слои по имени (by_name=True)\r\nfor layer in inc_model.layers[:inception_nontrainable_layers_count]:\r\n layer.trainable = False\r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer=SGD(lr=1e-4, momentum=0.9),\r\n #optimizer='rmsprop',\r\n metrics=['accuracy']) # тонкая настройка (в первый раз использовали RMSProp, во второй раз используем стохастический градиентный бустинг для того, чтобы веса не слищком сильно обновлялись)\r\nfilepath=os.path.join(path_to_save_np,\"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5\")\r\ncheckpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\r\ncallbacks_list = [checkpoint]\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True) # здесь происходит аугментация данных, в частности, горизонтальное отражение\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\ntrain_generator = train_datagen.flow_from_directory(\r\n train_path,\r\n target_size=image_size,\r\n batch_size=batch_size,\r\n class_mode='binary')\r\nvalidation_generator = test_datagen.flow_from_directory(\r\n validation_path,\r\n target_size=image_size,\r\n batch_size=batch_size,\r\n class_mode='binary')\r\npred_generator=test_datagen.flow_from_directory(validation_path,\r\n target_size=image_size,\r\n batch_size=batch_size2,\r\n class_mode='binary')\r\n\r\nmodel.fit_generator(\r\n train_generator,\r\n steps_per_epoch=np.ceil(train_data.shape[0]/batch_size),\r\n epochs=2,\r\n validation_data=validation_generator,\r\n validation_steps=np.ceil(validation_data.shape[0]/batch_size),\r\n callbacks=callbacks_list)\r\nmodel.evaluate_generator(pred_generator, val_samples=batch_size2)# val_samples должен быть равен величине батча в генераторе!!!\r\nimgs,labels=pred_generator.next() # загружает изображения в генератор и присваивает ей label\r\narray_imgs=np.transpose(np.asarray([img_to_array(img) for img in imgs]),(0,2,1,3))\r\npredictions=model.predict(imgs)\r\nrounded_pred=np.asarray([np.round(i) for i in predictions])\r\nprint(\"Accuracy score: \"+str(accuracy_score(labels,rounded_pred)))\r\nprint(\"Confusion matrix: \")\r\nprint(confusion_matrix(labels,rounded_pred))\r\nprint(\"F1-score(average='macro'): \"+str(f1_score(labels, rounded_pred, average='macro')))\r\nprint(\"F1-score(average='micro'): \"+str(f1_score(labels, rounded_pred, average='micro')))\r\nprint(\"F1-score(average='weighted'): \"+str(f1_score(labels, rounded_pred, average='weighted')))\r\nprint(\"F1-score(average=None): \")\r\nprint(f1_score(labels, rounded_pred, average=None))\r\nprint(\"Precision-score(average='macro'):\"+str(precision_score(labels, rounded_pred, average='macro')))\r\nprint(\"Precision-score(average='micro'): \"+str(precision_score(labels, rounded_pred, average='micro')))\r\nprint(\"Precision-score(average='weighted'): \"+str(precision_score(labels, rounded_pred, average='weighted')))\r\nprint(\"Precision-score(average=None): \")\r\nprint(precision_score(labels, rounded_pred, average=None))\r\nprint(\"Recall-score(average='macro'):\"+str(recall_score(labels, rounded_pred, average='macro')))\r\nprint(\"Recall-score(average='micro'): \"+str(recall_score(labels, rounded_pred, average='micro')))\r\nprint(\"Recall-score(average='weighted'): \"+str(recall_score(labels, rounded_pred, average='weighted')))\r\nprint(\"Recall-score(average=None): \")\r\nprint(recall_score(labels, rounded_pred, average=None))\r\nif n_classes==2:\r\n print(\"ROC_AUC score:\" + str(roc_auc_score(labels, rounded_pred)))\r\n print(\"F1-score(average='binary'): \" + str(f1_score(labels, rounded_pred, average='binary')))\r\n print(\"Precision-score(average='binary'):\" + str(precision_score(labels, rounded_pred, average='binary')))\r\n print(\"Recall-score(average='binary'):\" + str(recall_score(labels, rounded_pred, average='binary')))", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import torch import numpy as np import h5py from torch.utils.data import Dataset, DataLoader from config import PARAS """ Be careful: We use log mel-spectrogram for training, while the mask generated is for power mel-spectrogram """ def create_gt_mask(vocal_spec, bg_spec): """ Take in log spectrogram and return a mask map for TF bins 1 if the vocal sound is dominated in the TF-bin, while 0 for not """ vocal_spec = vocal_spec.numpy() bg_spec = bg_spec.numpy() return np.array(vocal_spec > bg_spec, dtype=np.float32) class TorchData(Dataset): def __init__(self, dataset_path): """ Take the h5py dataset """ super(TorchData, self).__init__() self.dataset = h5py.File(dataset_path, 'r') self.bg = self.dataset['bg'] self.vocal = self.dataset['vocal'] self.mix = self.dataset['mix'] self.len = self.bg.shape[0] def __len__(self): return self.len def __getitem__(self, index): bg = self.bg[index].astype(np.float32) vocal = self.vocal[index].astype(np.float32) mix = self.mix[index].astype(np.float32) mix = torch.from_numpy(mix) bg = torch.from_numpy(bg) vocal = torch.from_numpy(vocal) target = torch.from_numpy(create_gt_mask(vocal, bg)) sample = { 'vocal': vocal, # this is used for test 'bg': bg, # this is used for test 'mix': mix, 'target': target, } return sample # define the data loaders def torch_dataset_loader(dataset, batch_size, shuffle, kwargs): """ take the h5py dataset """ loader = DataLoader(TorchData(dataset), batch_size=batch_size, shuffle=shuffle, **kwargs) return loader train_loader = torch_dataset_loader(PARAS.TRAIN_DATA_PATH, PARAS.BATCH_SIZE, True, PARAS.kwargs) validation_loader = torch_dataset_loader(PARAS.VAL_DATA_PATH, PARAS.BATCH_SIZE, False, PARAS.kwargs) test_loader = torch_dataset_loader(PARAS.TEST_DATA_PATH, PARAS.BATCH_SIZE, False, PARAS.kwargs) if __name__ == '__main__': for index, data_item in enumerate(test_loader): print(data_item['vocal'].shape) print(data_item['bg'].shape) print(data_item['mix'].shape) print(data_item['target'].shape) break
normal
{ "blob_id": "1133d3cf900e31278dc491565c99969a116e6c83", "index": 1998, "step-1": "<mask token>\n\n\nclass TorchData(Dataset):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef create_gt_mask(vocal_spec, bg_spec):\n \"\"\"\n Take in log spectrogram and return a mask map for TF bins\n 1 if the vocal sound is dominated in the TF-bin, while 0 for not\n \"\"\"\n vocal_spec = vocal_spec.numpy()\n bg_spec = bg_spec.numpy()\n return np.array(vocal_spec > bg_spec, dtype=np.float32)\n\n\nclass TorchData(Dataset):\n\n def __init__(self, dataset_path):\n \"\"\"\n Take the h5py dataset\n \"\"\"\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n bg = self.bg[index].astype(np.float32)\n vocal = self.vocal[index].astype(np.float32)\n mix = self.mix[index].astype(np.float32)\n mix = torch.from_numpy(mix)\n bg = torch.from_numpy(bg)\n vocal = torch.from_numpy(vocal)\n target = torch.from_numpy(create_gt_mask(vocal, bg))\n sample = {'vocal': vocal, 'bg': bg, 'mix': mix, 'target': target}\n return sample\n\n\ndef torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n \"\"\"\n take the h5py dataset\n \"\"\"\n loader = DataLoader(TorchData(dataset), batch_size=batch_size, shuffle=\n shuffle, **kwargs)\n return loader\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef create_gt_mask(vocal_spec, bg_spec):\n \"\"\"\n Take in log spectrogram and return a mask map for TF bins\n 1 if the vocal sound is dominated in the TF-bin, while 0 for not\n \"\"\"\n vocal_spec = vocal_spec.numpy()\n bg_spec = bg_spec.numpy()\n return np.array(vocal_spec > bg_spec, dtype=np.float32)\n\n\nclass TorchData(Dataset):\n\n def __init__(self, dataset_path):\n \"\"\"\n Take the h5py dataset\n \"\"\"\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n bg = self.bg[index].astype(np.float32)\n vocal = self.vocal[index].astype(np.float32)\n mix = self.mix[index].astype(np.float32)\n mix = torch.from_numpy(mix)\n bg = torch.from_numpy(bg)\n vocal = torch.from_numpy(vocal)\n target = torch.from_numpy(create_gt_mask(vocal, bg))\n sample = {'vocal': vocal, 'bg': bg, 'mix': mix, 'target': target}\n return sample\n\n\ndef torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n \"\"\"\n take the h5py dataset\n \"\"\"\n loader = DataLoader(TorchData(dataset), batch_size=batch_size, shuffle=\n shuffle, **kwargs)\n return loader\n\n\ntrain_loader = torch_dataset_loader(PARAS.TRAIN_DATA_PATH, PARAS.BATCH_SIZE,\n True, PARAS.kwargs)\nvalidation_loader = torch_dataset_loader(PARAS.VAL_DATA_PATH, PARAS.\n BATCH_SIZE, False, PARAS.kwargs)\ntest_loader = torch_dataset_loader(PARAS.TEST_DATA_PATH, PARAS.BATCH_SIZE, \n False, PARAS.kwargs)\nif __name__ == '__main__':\n for index, data_item in enumerate(test_loader):\n print(data_item['vocal'].shape)\n print(data_item['bg'].shape)\n print(data_item['mix'].shape)\n print(data_item['target'].shape)\n break\n", "step-4": "import torch\nimport numpy as np\nimport h5py\nfrom torch.utils.data import Dataset, DataLoader\nfrom config import PARAS\n<mask token>\n\n\ndef create_gt_mask(vocal_spec, bg_spec):\n \"\"\"\n Take in log spectrogram and return a mask map for TF bins\n 1 if the vocal sound is dominated in the TF-bin, while 0 for not\n \"\"\"\n vocal_spec = vocal_spec.numpy()\n bg_spec = bg_spec.numpy()\n return np.array(vocal_spec > bg_spec, dtype=np.float32)\n\n\nclass TorchData(Dataset):\n\n def __init__(self, dataset_path):\n \"\"\"\n Take the h5py dataset\n \"\"\"\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n bg = self.bg[index].astype(np.float32)\n vocal = self.vocal[index].astype(np.float32)\n mix = self.mix[index].astype(np.float32)\n mix = torch.from_numpy(mix)\n bg = torch.from_numpy(bg)\n vocal = torch.from_numpy(vocal)\n target = torch.from_numpy(create_gt_mask(vocal, bg))\n sample = {'vocal': vocal, 'bg': bg, 'mix': mix, 'target': target}\n return sample\n\n\ndef torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n \"\"\"\n take the h5py dataset\n \"\"\"\n loader = DataLoader(TorchData(dataset), batch_size=batch_size, shuffle=\n shuffle, **kwargs)\n return loader\n\n\ntrain_loader = torch_dataset_loader(PARAS.TRAIN_DATA_PATH, PARAS.BATCH_SIZE,\n True, PARAS.kwargs)\nvalidation_loader = torch_dataset_loader(PARAS.VAL_DATA_PATH, PARAS.\n BATCH_SIZE, False, PARAS.kwargs)\ntest_loader = torch_dataset_loader(PARAS.TEST_DATA_PATH, PARAS.BATCH_SIZE, \n False, PARAS.kwargs)\nif __name__ == '__main__':\n for index, data_item in enumerate(test_loader):\n print(data_item['vocal'].shape)\n print(data_item['bg'].shape)\n print(data_item['mix'].shape)\n print(data_item['target'].shape)\n break\n", "step-5": "import torch\nimport numpy as np\nimport h5py\nfrom torch.utils.data import Dataset, DataLoader\nfrom config import PARAS\n\n\"\"\"\nBe careful:\nWe use log mel-spectrogram for training,\nwhile the mask generated is for power mel-spectrogram\n\"\"\"\n\n\ndef create_gt_mask(vocal_spec, bg_spec):\n \"\"\"\n Take in log spectrogram and return a mask map for TF bins\n 1 if the vocal sound is dominated in the TF-bin, while 0 for not\n \"\"\"\n vocal_spec = vocal_spec.numpy()\n bg_spec = bg_spec.numpy()\n return np.array(vocal_spec > bg_spec, dtype=np.float32)\n\n\nclass TorchData(Dataset):\n\n def __init__(self, dataset_path):\n \"\"\"\n Take the h5py dataset\n \"\"\"\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n bg = self.bg[index].astype(np.float32)\n vocal = self.vocal[index].astype(np.float32)\n mix = self.mix[index].astype(np.float32)\n\n mix = torch.from_numpy(mix)\n bg = torch.from_numpy(bg)\n vocal = torch.from_numpy(vocal)\n target = torch.from_numpy(create_gt_mask(vocal, bg))\n\n sample = {\n 'vocal': vocal, # this is used for test\n 'bg': bg, # this is used for test\n 'mix': mix,\n 'target': target,\n }\n\n return sample\n\n\n# define the data loaders\ndef torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n \"\"\"\n take the h5py dataset\n \"\"\"\n loader = DataLoader(TorchData(dataset),\n batch_size=batch_size,\n shuffle=shuffle,\n **kwargs)\n return loader\n\n\ntrain_loader = torch_dataset_loader(PARAS.TRAIN_DATA_PATH, PARAS.BATCH_SIZE, True, PARAS.kwargs)\nvalidation_loader = torch_dataset_loader(PARAS.VAL_DATA_PATH, PARAS.BATCH_SIZE, False, PARAS.kwargs)\ntest_loader = torch_dataset_loader(PARAS.TEST_DATA_PATH, PARAS.BATCH_SIZE, False, PARAS.kwargs)\n\n\nif __name__ == '__main__':\n\n for index, data_item in enumerate(test_loader):\n print(data_item['vocal'].shape)\n print(data_item['bg'].shape)\n print(data_item['mix'].shape)\n print(data_item['target'].shape)\n break\n", "step-ids": [ 1, 6, 8, 9, 10 ] }
[ 1, 6, 8, 9, 10 ]
class Meta(type): def __new__(meta, name, bases, class_dict): print(f'* Running {meta}.__new__ for {name}') print("Bases:", bases) print(class_dict) return type.__new__(meta, name, bases, class_dict) class MyClass(metaclass=Meta): stuff = 123 def foo(self): pass class MySubClass(MyClass): ofther = 456 def bar(self): pass print("") class MyClass2: stuff = 123 def __init_subclass__(cls): super().__init_subclass__() print(f'* Running {cls.__name__}.__init_subclass__') print(cls.__dict__) print(cls.super().__dict__) def foo(self): pass class MySubClass2(MyClass2): ofther = 456 def bar(self): pass
normal
{ "blob_id": "8f3abc5beaded94b6d7b93ac2cfcd12145d75fe8", "index": 522, "step-1": "<mask token>\n\n\nclass MySubClass(MyClass):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n", "step-2": "<mask token>\n\n\nclass MyClass(metaclass=Meta):\n <mask token>\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<mask token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n", "step-3": "class Meta(type):\n <mask token>\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<mask token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n", "step-4": "class Meta(type):\n\n def __new__(meta, name, bases, class_dict):\n print(f'* Running {meta}.__new__ for {name}')\n print('Bases:', bases)\n print(class_dict)\n return type.__new__(meta, name, bases, class_dict)\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<mask token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n", "step-5": "class Meta(type):\n def __new__(meta, name, bases, class_dict):\n print(f'* Running {meta}.__new__ for {name}')\n print(\"Bases:\", bases)\n print(class_dict)\n return type.__new__(meta, name, bases, class_dict)\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\nprint(\"\")\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n\n def foo(self):\n pass\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n", "step-ids": [ 8, 12, 14, 15, 17 ] }
[ 8, 12, 14, 15, 17 ]
import os os.mkdir("作业") f=open("D:/six3/s/作业/tet.txt",'w+') for i in range(10): f.write("hello world\n") f.seek(0) s=f.read(100) print(s) f=open("D:/six3/s/作业/tet2.txt",'w+') for i in s: f.write(i) f.close()
normal
{ "blob_id": "5f5e314d2d18deb12a8ae757a117ef8fbb2ddad5", "index": 2391, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.mkdir('作业')\n<mask token>\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\n<mask token>\nprint(s)\n<mask token>\nfor i in s:\n f.write(i)\nf.close()\n", "step-3": "<mask token>\nos.mkdir('作业')\nf = open('D:/six3/s/作业/tet.txt', 'w+')\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\ns = f.read(100)\nprint(s)\nf = open('D:/six3/s/作业/tet2.txt', 'w+')\nfor i in s:\n f.write(i)\nf.close()\n", "step-4": "import os\nos.mkdir('作业')\nf = open('D:/six3/s/作业/tet.txt', 'w+')\nfor i in range(10):\n f.write('hello world\\n')\nf.seek(0)\ns = f.read(100)\nprint(s)\nf = open('D:/six3/s/作业/tet2.txt', 'w+')\nfor i in s:\n f.write(i)\nf.close()\n", "step-5": "import os\nos.mkdir(\"作业\")\nf=open(\"D:/six3/s/作业/tet.txt\",'w+')\nfor i in range(10):\n f.write(\"hello world\\n\")\n\nf.seek(0)\ns=f.read(100)\nprint(s)\nf=open(\"D:/six3/s/作业/tet2.txt\",'w+')\nfor i in s:\n f.write(i)\nf.close()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open('datasetParsing2DEF.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f"Column names are {', '.join(row)}") line_count += 1 else: print(row[1], row[2]) words.append(row[2]) etichette.append(row[1]) line_count += 1 print(f'Processed {line_count} lines.') <|reserved_special_token_0|> print(word_embeddings) <|reserved_special_token_0|> kmeans.fit(word_embeddings), <|reserved_special_token_0|> print(y_kmeans) <|reserved_special_token_0|> for j in range(0, len(y_kmeans)): print(etichette[i]) print(word_embeddings[j, 0]) print(word_embeddings[j, 1]) print() plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' + 'O' + '$', s=30, label=j) i = i + 1 <|reserved_special_token_0|> plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5) plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> c2v_model = chars2vec.load_model('eng_50') words = [] etichette = [] with open('datasetParsing2DEF.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f"Column names are {', '.join(row)}") line_count += 1 else: print(row[1], row[2]) words.append(row[2]) etichette.append(row[1]) line_count += 1 print(f'Processed {line_count} lines.') word_embeddings = c2v_model.vectorize_words(words) print(word_embeddings) kmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200, random_state=30) kmeans.fit(word_embeddings), y_kmeans = kmeans.predict(word_embeddings) print(y_kmeans) i = 0 for j in range(0, len(y_kmeans)): print(etichette[i]) print(word_embeddings[j, 0]) print(word_embeddings[j, 1]) print() plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' + 'O' + '$', s=30, label=j) i = i + 1 centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5) plt.show() <|reserved_special_token_1|> import chars2vec import sklearn.decomposition import matplotlib.pyplot as plt import csv from sklearn.cluster import KMeans c2v_model = chars2vec.load_model('eng_50') words = [] etichette = [] with open('datasetParsing2DEF.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f"Column names are {', '.join(row)}") line_count += 1 else: print(row[1], row[2]) words.append(row[2]) etichette.append(row[1]) line_count += 1 print(f'Processed {line_count} lines.') word_embeddings = c2v_model.vectorize_words(words) print(word_embeddings) kmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200, random_state=30) kmeans.fit(word_embeddings), y_kmeans = kmeans.predict(word_embeddings) print(y_kmeans) i = 0 for j in range(0, len(y_kmeans)): print(etichette[i]) print(word_embeddings[j, 0]) print(word_embeddings[j, 1]) print() plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' + 'O' + '$', s=30, label=j) i = i + 1 centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5) plt.show() <|reserved_special_token_1|> import chars2vec import sklearn.decomposition import matplotlib.pyplot as plt import csv # Load Inutition Engineering pretrained model # Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300' from sklearn.cluster import KMeans c2v_model = chars2vec.load_model('eng_50') words=[] etichette=[] with open('datasetParsing2DEF.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f'Column names are {", ".join(row)}') line_count += 1 else: print(row[1],row[2]) words.append(row[2]) etichette.append(row[1]) line_count += 1 print(f'Processed {line_count} lines.') # Create word embeddings word_embeddings = c2v_model.vectorize_words(words) print(word_embeddings) kmeans = KMeans( init="random", n_clusters=4, n_init=10, max_iter=200, random_state=30) kmeans.fit(word_embeddings), y_kmeans = kmeans.predict(word_embeddings) print(y_kmeans) i=0; for j in range(0,len(y_kmeans)): print(etichette[i]) print(word_embeddings[j,0]) print(word_embeddings[j,1]) print() #plt.scatter(word_embeddings[:, 0], word_embeddings[:, 1],marker=('$' + etichette[i] + '$'),c=y_kmeans, s=1800) plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker=('$' + 'O'+ '$'), s=30, label=j) i=i+1 centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5) plt.show()
flexible
{ "blob_id": "084579152a2cc7feb2c31e0209ce1e32f4905d81", "index": 5316, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\n<mask token>\nprint(word_embeddings)\n<mask token>\nkmeans.fit(word_embeddings),\n<mask token>\nprint(y_kmeans)\n<mask token>\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\n<mask token>\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n", "step-3": "<mask token>\nc2v_model = chars2vec.load_model('eng_50')\nwords = []\netichette = []\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\nkmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200,\n random_state=30)\nkmeans.fit(word_embeddings),\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni = 0\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\ncenters = kmeans.cluster_centers_\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n", "step-4": "import chars2vec\nimport sklearn.decomposition\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.cluster import KMeans\nc2v_model = chars2vec.load_model('eng_50')\nwords = []\netichette = []\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f\"Column names are {', '.join(row)}\")\n line_count += 1\n else:\n print(row[1], row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n print(f'Processed {line_count} lines.')\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\nkmeans = KMeans(init='random', n_clusters=4, n_init=10, max_iter=200,\n random_state=30)\nkmeans.fit(word_embeddings),\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni = 0\nfor j in range(0, len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j, 0])\n print(word_embeddings[j, 1])\n print()\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1], marker='$' +\n 'O' + '$', s=30, label=j)\n i = i + 1\ncenters = kmeans.cluster_centers_\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nplt.show()\n", "step-5": "import chars2vec\nimport sklearn.decomposition\nimport matplotlib.pyplot as plt\nimport csv\n\n# Load Inutition Engineering pretrained model\n# Models names: 'eng_50', 'eng_100', 'eng_150' 'eng_200', 'eng_300'\nfrom sklearn.cluster import KMeans\n\nc2v_model = chars2vec.load_model('eng_50')\n\nwords=[]\netichette=[]\n\nwith open('datasetParsing2DEF.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(row[1],row[2])\n words.append(row[2])\n etichette.append(row[1])\n line_count += 1\n\n\n print(f'Processed {line_count} lines.')\n\n\n\n# Create word embeddings\nword_embeddings = c2v_model.vectorize_words(words)\nprint(word_embeddings)\n\n\nkmeans = KMeans(\n init=\"random\",\n n_clusters=4,\n n_init=10,\n max_iter=200,\n random_state=30)\n\nkmeans.fit(word_embeddings),\n\ny_kmeans = kmeans.predict(word_embeddings)\nprint(y_kmeans)\ni=0;\nfor j in range(0,len(y_kmeans)):\n print(etichette[i])\n print(word_embeddings[j,0])\n print(word_embeddings[j,1])\n print()\n #plt.scatter(word_embeddings[:, 0], word_embeddings[:, 1],marker=('$' + etichette[i] + '$'),c=y_kmeans, s=1800)\n plt.scatter(word_embeddings[j, 0], word_embeddings[j, 1],\n marker=('$' + 'O'+ '$'),\n s=30, label=j)\n i=i+1\n\ncenters = kmeans.cluster_centers_\n\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\n\nplt.show()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for card in cards: try: number = int(card) if number % 2 == 0: print(card, 'is an even card.') except ValueError: print(card, 'can not be divided') <|reserved_special_token_1|> cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A'] for card in cards: try: number = int(card) if number % 2 == 0: print(card, 'is an even card.') except ValueError: print(card, 'can not be divided') <|reserved_special_token_1|> # print all cards with even numbers. cards = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "A"] for card in cards: try: number = int(card) if number % 2 == 0: # modulo operator print(card, "is an even card.") except ValueError: print (card, "can not be divided")
flexible
{ "blob_id": "b5180a2dbe1f12e1bbc92874c67ea99c9a84a9ed", "index": 19, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n", "step-3": "cards = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0:\n print(card, 'is an even card.')\n except ValueError:\n print(card, 'can not be divided')\n", "step-4": "\n# print all cards with even numbers.\n\ncards = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\n\nfor card in cards:\n try:\n number = int(card)\n if number % 2 == 0: # modulo operator\n print(card, \"is an even card.\")\n except ValueError:\n print (card, \"can not be divided\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
INITIAL_B = 0.15062677711161448 B_FACTOR = 5.0 INITIAL_GE = 0.22581915788215678 GE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0] FIXED_P = 0.9401234488501574 INITIAL_GU = 0.2145066414796447 GU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0] INITIAL_GI = 0.19235137989123863 GI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0] INITIAL_GH = 0.044937075878220795 GH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0] INITIAL_MU = 0.002840331041978459 MU_BOUNDS = [0.0, 0.1] INITIAL_PARAMETERS = [ INITIAL_B, INITIAL_GE, FIXED_P, INITIAL_GU, INITIAL_GI, INITIAL_GH, None, # rH INITIAL_MU, ] E_FACTOR = 5.0 U_FACTOR = 5.0 I_FACTOR = 5.0
normal
{ "blob_id": "47cf3045f2fa0f69759e09b1599e4afe953c06d8", "index": 5138, "step-1": "<mask token>\n", "step-2": "INITIAL_B = 0.15062677711161448\nB_FACTOR = 5.0\nINITIAL_GE = 0.22581915788215678\nGE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0]\nFIXED_P = 0.9401234488501574\nINITIAL_GU = 0.2145066414796447\nGU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0]\nINITIAL_GI = 0.19235137989123863\nGI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0]\nINITIAL_GH = 0.044937075878220795\nGH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0]\nINITIAL_MU = 0.002840331041978459\nMU_BOUNDS = [0.0, 0.1]\nINITIAL_PARAMETERS = [INITIAL_B, INITIAL_GE, FIXED_P, INITIAL_GU,\n INITIAL_GI, INITIAL_GH, None, INITIAL_MU]\nE_FACTOR = 5.0\nU_FACTOR = 5.0\nI_FACTOR = 5.0\n", "step-3": "INITIAL_B = 0.15062677711161448\nB_FACTOR = 5.0\n\nINITIAL_GE = 0.22581915788215678\nGE_BOUNDS = [1.0 / 10.0, 1.0 / 4.0]\n\nFIXED_P = 0.9401234488501574\n\nINITIAL_GU = 0.2145066414796447\nGU_BOUNDS = [1.0 / 15.0, 1.0 / 2.0]\n\nINITIAL_GI = 0.19235137989123863\nGI_BOUNDS = [1.0 / 15.0, 1.0 / 5.0]\n\nINITIAL_GH = 0.044937075878220795\nGH_BOUNDS = [1.0 / 20.0, 1.0 / 5.0]\n\nINITIAL_MU = 0.002840331041978459\nMU_BOUNDS = [0.0, 0.1]\n\nINITIAL_PARAMETERS = [\n INITIAL_B,\n INITIAL_GE,\n FIXED_P,\n INITIAL_GU,\n INITIAL_GI,\n INITIAL_GH,\n None, # rH\n INITIAL_MU,\n]\n\nE_FACTOR = 5.0\nU_FACTOR = 5.0\nI_FACTOR = 5.0\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class MonitorList(tp.Generic[T], collections.UserList, Monitor): <|reserved_special_token_0|> def __init__(self, *args): collections.UserList.__init__(self, *args) Monitor.__init__(self) <|reserved_special_token_0|> <|reserved_special_token_0|> def __getitem__(self, item: tp.Union[slice, int]) ->T: return self.data[item] def __setitem__(self, key: int, value: T) ->None: self.data[key] = value def __delitem__(self, key: tp.Union[slice, int]) ->None: del self.data[key] class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor): """ A dict that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args, **kwargs): collections.UserDict.__init__(self, *args, **kwargs) Monitor.__init__(self) def __getitem__(self, item: K) ->V: return self.data[item] def __setitem__(self, key: K, value: V) ->None: self.data[key] = value def __delitem__(self, key: K) ->None: del self.data[key] def __copy__(self) ->'MonitorDict': return MonitorDict(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorDict': return MonitorDict(copy.deepcopy(self.data, memo=memo)) class MonitorSet(set, Monitor): """ A set that allows atomic insert-if-not-already-there operation """ def __init__(self, *args): super().__init__(*args) Monitor.__init__(self) def insert_and_check(self, item) ->bool: """ Perform an atomic insert if not already in set :param item: item to insert :return: whether the item was successfully inserted """ with Monitor.acquire(self): if item in self: return False self.add(item) return True <|reserved_special_token_1|> <|reserved_special_token_0|> class MonitorList(tp.Generic[T], collections.UserList, Monitor): """ A list that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args): collections.UserList.__init__(self, *args) Monitor.__init__(self) def __copy__(self) ->'MonitorList': return MonitorList(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorList': return MonitorList(copy.deepcopy(self.data, memo=memo)) def __getitem__(self, item: tp.Union[slice, int]) ->T: return self.data[item] def __setitem__(self, key: int, value: T) ->None: self.data[key] = value def __delitem__(self, key: tp.Union[slice, int]) ->None: del self.data[key] class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor): """ A dict that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args, **kwargs): collections.UserDict.__init__(self, *args, **kwargs) Monitor.__init__(self) def __getitem__(self, item: K) ->V: return self.data[item] def __setitem__(self, key: K, value: V) ->None: self.data[key] = value def __delitem__(self, key: K) ->None: del self.data[key] def __copy__(self) ->'MonitorDict': return MonitorDict(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorDict': return MonitorDict(copy.deepcopy(self.data, memo=memo)) class MonitorSet(set, Monitor): """ A set that allows atomic insert-if-not-already-there operation """ def __init__(self, *args): super().__init__(*args) Monitor.__init__(self) def insert_and_check(self, item) ->bool: """ Perform an atomic insert if not already in set :param item: item to insert :return: whether the item was successfully inserted """ with Monitor.acquire(self): if item in self: return False self.add(item) return True <|reserved_special_token_1|> <|reserved_special_token_0|> class RMonitor(Monitor): <|reserved_special_token_0|> def __init__(self): self._monitor_lock = threading.RLock() class MonitorList(tp.Generic[T], collections.UserList, Monitor): """ A list that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args): collections.UserList.__init__(self, *args) Monitor.__init__(self) def __copy__(self) ->'MonitorList': return MonitorList(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorList': return MonitorList(copy.deepcopy(self.data, memo=memo)) def __getitem__(self, item: tp.Union[slice, int]) ->T: return self.data[item] def __setitem__(self, key: int, value: T) ->None: self.data[key] = value def __delitem__(self, key: tp.Union[slice, int]) ->None: del self.data[key] class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor): """ A dict that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args, **kwargs): collections.UserDict.__init__(self, *args, **kwargs) Monitor.__init__(self) def __getitem__(self, item: K) ->V: return self.data[item] def __setitem__(self, key: K, value: V) ->None: self.data[key] = value def __delitem__(self, key: K) ->None: del self.data[key] def __copy__(self) ->'MonitorDict': return MonitorDict(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorDict': return MonitorDict(copy.deepcopy(self.data, memo=memo)) class MonitorSet(set, Monitor): """ A set that allows atomic insert-if-not-already-there operation """ def __init__(self, *args): super().__init__(*args) Monitor.__init__(self) def insert_and_check(self, item) ->bool: """ Perform an atomic insert if not already in set :param item: item to insert :return: whether the item was successfully inserted """ with Monitor.acquire(self): if item in self: return False self.add(item) return True <|reserved_special_token_1|> <|reserved_special_token_0|> class RMonitor(Monitor): """ Monitor, but using an reentrant lock instead of a normal one """ def __init__(self): self._monitor_lock = threading.RLock() class MonitorList(tp.Generic[T], collections.UserList, Monitor): """ A list that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args): collections.UserList.__init__(self, *args) Monitor.__init__(self) def __copy__(self) ->'MonitorList': return MonitorList(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorList': return MonitorList(copy.deepcopy(self.data, memo=memo)) def __getitem__(self, item: tp.Union[slice, int]) ->T: return self.data[item] def __setitem__(self, key: int, value: T) ->None: self.data[key] = value def __delitem__(self, key: tp.Union[slice, int]) ->None: del self.data[key] class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor): """ A dict that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args, **kwargs): collections.UserDict.__init__(self, *args, **kwargs) Monitor.__init__(self) def __getitem__(self, item: K) ->V: return self.data[item] def __setitem__(self, key: K, value: V) ->None: self.data[key] = value def __delitem__(self, key: K) ->None: del self.data[key] def __copy__(self) ->'MonitorDict': return MonitorDict(copy.copy(self.data)) def __deepcopy__(self, memo) ->'MonitorDict': return MonitorDict(copy.deepcopy(self.data, memo=memo)) class MonitorSet(set, Monitor): """ A set that allows atomic insert-if-not-already-there operation """ def __init__(self, *args): super().__init__(*args) Monitor.__init__(self) def insert_and_check(self, item) ->bool: """ Perform an atomic insert if not already in set :param item: item to insert :return: whether the item was successfully inserted """ with Monitor.acquire(self): if item in self: return False self.add(item) return True <|reserved_special_token_1|> import collections import copy import threading import typing as tp from ..decorators.decorators import wraps from ..typing import K, V, T class Monitor: """ Base utility class for creating monitors (the synchronization thingies!) These are NOT re-entrant! Use it like that: >>> class MyProtectedObject(Monitor): >>> def __init__(self, *args, **kwargs): >>> Monitor.__init__(self) >>> ... do your job .. >>> @Monitor.synchronized >>> def function_that_needs_mutual_exclusion(self): >>> .. do your threadsafe jobs .. >>> def function_that_partially_needs_protection(self): >>> .. do your jobs .. >>> with Monitor.acquire(self): >>> .. do your threadsafe jobs .. >>> .. do your jobs .. >>> with self: >>> .. do your threadsafe jobs .. """ def __enter__(self) -> 'Monitor': self._monitor_lock.acquire() return self def __exit__(self, exc_type, exc_val, exc_tb) -> bool: self._monitor_lock.release() return False def __init__(self): """You need to invoke this at your constructor You can also use it to release locks of other objects.""" self._monitor_lock = threading.Lock() # type: threading.Lock @staticmethod def synchronize_on_attribute(attr_name: str): """ When a Monitor is an attribute of a class, and you have a method instance that you would like secure by acquiring that monitor, use this. The first argument taken by that method instance must be self. :param attr_name: name of the attribute that is the monitor """ def outer(fun): @wraps(fun) def method(self, *args, **kwargs): # noinspection PyProtectedMember with getattr(self, attr_name)._monitor_lock: return fun(self, *args, **kwargs) return method return outer @staticmethod def synchronized(fun: tp.Callable) -> tp.Callable: """ This is a decorator. Class method decorated with that will lock the global lock of given instance, making it threadsafe. Depending on usage pattern of your class and it's data semantics, your performance may vary """ @wraps(fun) def monitored(*args, **kwargs): # noinspection PyProtectedMember with args[0]._monitor_lock: return fun(*args, **kwargs) return monitored class release: """ Returns a context manager object that can release another object as long as that object is a monitor. Consider foo, which is a monitor. You have a protected function, but you feel that you can release it for a while as it would improve parallelism. You can use it as such: >>> @Monitor.synchronized >>> def protected_function(self): >>> .. do some stuff that needs mutual exclusion .. >>> with Monitor.release(self): >>> .. do some I/O that does not need mutual exclusion .. >>> .. back to protected stuff .. """ __slots__ = ('foo',) def __init__(self, foo: 'Monitor'): self.foo = foo def __enter__(self) -> None: # noinspection PyProtectedMember self.foo._monitor_lock.release() def __exit__(self, e1, e2, e3) -> bool: # noinspection PyProtectedMember self.foo._monitor_lock.acquire() return False class acquire: """ Returns a context manager object that can lock another object, as long as that object is a monitor. Consider foo, which is a monitor. If you needed to lock it from outside, you would do: >>> with Monitor.acquire(foo): >>> .. do operations on foo that need mutual exclusion .. """ __slots__ = ('foo',) def __init__(self, foo: 'Monitor'): self.foo = foo def __enter__(self) -> None: # noinspection PyProtectedMember self.foo._monitor_lock.acquire() def __exit__(self, e1, e2, e3) -> bool: # noinspection PyProtectedMember self.foo._monitor_lock.release() return False @classmethod def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]: """ A decorator for locking on non-self Monitor objects Use it like: >>> class MasterClass(Monitor): >>> def get_object(self): >>> class SlaveClass: >>> @Monitor.synchronize_on(self) >>> def get_object(self2): >>> ... >>> return SlaveClass """ def outer(fun): @wraps(fun) def inner(*args, **kwargs): with cls.acquire(monitor): return fun(*args, **kwargs) return inner return outer class RMonitor(Monitor): """ Monitor, but using an reentrant lock instead of a normal one """ def __init__(self): self._monitor_lock = threading.RLock() # type: threading.RLock class MonitorList(tp.Generic[T], collections.UserList, Monitor): """ A list that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args): collections.UserList.__init__(self, *args) Monitor.__init__(self) def __copy__(self) -> 'MonitorList': return MonitorList(copy.copy(self.data)) def __deepcopy__(self, memo) -> 'MonitorList': return MonitorList(copy.deepcopy(self.data, memo=memo)) def __getitem__(self, item: tp.Union[slice, int]) -> T: return self.data[item] def __setitem__(self, key: int, value: T) -> None: self.data[key] = value def __delitem__(self, key: tp.Union[slice, int]) -> None: del self.data[key] class MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor): """ A dict that is also a monitor. Note that access to it's properties is not automatically synchronized, you got to invoke the monitor to implement an opportunistic locking of your own choice """ def __init__(self, *args, **kwargs): collections.UserDict.__init__(self, *args, **kwargs) Monitor.__init__(self) def __getitem__(self, item: K) -> V: return self.data[item] def __setitem__(self, key: K, value: V) -> None: self.data[key] = value def __delitem__(self, key: K) -> None: del self.data[key] def __copy__(self) -> 'MonitorDict': return MonitorDict(copy.copy(self.data)) def __deepcopy__(self, memo) -> 'MonitorDict': return MonitorDict(copy.deepcopy(self.data, memo=memo)) class MonitorSet(set, Monitor): """ A set that allows atomic insert-if-not-already-there operation """ def __init__(self, *args): super().__init__(*args) Monitor.__init__(self) def insert_and_check(self, item) -> bool: """ Perform an atomic insert if not already in set :param item: item to insert :return: whether the item was successfully inserted """ with Monitor.acquire(self): if item in self: return False self.add(item) return True
flexible
{ "blob_id": "0528d7761cbbf3dbe881ff05b81060f3d97e7f6c", "index": 742, "step-1": "<mask token>\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n <mask token>\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n <mask token>\n <mask token>\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n", "step-2": "<mask token>\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n", "step-3": "<mask token>\n\n\nclass RMonitor(Monitor):\n <mask token>\n\n def __init__(self):\n self._monitor_lock = threading.RLock()\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n", "step-4": "<mask token>\n\n\nclass RMonitor(Monitor):\n \"\"\"\n Monitor, but using an reentrant lock instead of a normal one\n \"\"\"\n\n def __init__(self):\n self._monitor_lock = threading.RLock()\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) ->'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) ->T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) ->None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) ->V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) ->None:\n self.data[key] = value\n\n def __delitem__(self, key: K) ->None:\n del self.data[key]\n\n def __copy__(self) ->'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) ->'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) ->bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n", "step-5": "import collections\nimport copy\nimport threading\nimport typing as tp\n\nfrom ..decorators.decorators import wraps\n\nfrom ..typing import K, V, T\n\n\nclass Monitor:\n \"\"\"\n Base utility class for creating monitors (the synchronization thingies!)\n\n These are NOT re-entrant!\n\n Use it like that:\n\n >>> class MyProtectedObject(Monitor):\n >>> def __init__(self, *args, **kwargs):\n >>> Monitor.__init__(self)\n >>> ... do your job ..\n\n >>> @Monitor.synchronized\n >>> def function_that_needs_mutual_exclusion(self):\n >>> .. do your threadsafe jobs ..\n\n >>> def function_that_partially_needs_protection(self):\n >>> .. do your jobs ..\n >>> with Monitor.acquire(self):\n >>> .. do your threadsafe jobs ..\n >>> .. do your jobs ..\n >>> with self:\n >>> .. do your threadsafe jobs ..\n \"\"\"\n\n def __enter__(self) -> 'Monitor':\n self._monitor_lock.acquire()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb) -> bool:\n self._monitor_lock.release()\n return False\n\n def __init__(self):\n \"\"\"You need to invoke this at your constructor\n You can also use it to release locks of other objects.\"\"\"\n self._monitor_lock = threading.Lock() # type: threading.Lock\n\n @staticmethod\n def synchronize_on_attribute(attr_name: str):\n \"\"\"\n When a Monitor is an attribute of a class, and you have a method instance\n that you would like secure by acquiring that monitor, use this.\n\n The first argument taken by that method instance must be self.\n\n :param attr_name: name of the attribute that is the monitor\n \"\"\"\n\n def outer(fun):\n @wraps(fun)\n def method(self, *args, **kwargs):\n # noinspection PyProtectedMember\n with getattr(self, attr_name)._monitor_lock:\n return fun(self, *args, **kwargs)\n\n return method\n\n return outer\n\n @staticmethod\n def synchronized(fun: tp.Callable) -> tp.Callable:\n \"\"\"\n This is a decorator. Class method decorated with that will lock the\n global lock of given instance, making it threadsafe. Depending on\n usage pattern of your class and it's data semantics, your performance\n may vary\n \"\"\"\n\n @wraps(fun)\n def monitored(*args, **kwargs):\n # noinspection PyProtectedMember\n with args[0]._monitor_lock:\n return fun(*args, **kwargs)\n\n return monitored\n\n class release:\n \"\"\"\n Returns a context manager object that can release another object\n as long as that object is a monitor.\n\n Consider foo, which is a monitor. You have a protected function,\n but you feel that you can release it for a while as it would\n improve parallelism. You can use it as such:\n\n >>> @Monitor.synchronized\n >>> def protected_function(self):\n >>> .. do some stuff that needs mutual exclusion ..\n >>> with Monitor.release(self):\n >>> .. do some I/O that does not need mutual exclusion ..\n >>> .. back to protected stuff ..\n \"\"\"\n __slots__ = ('foo',)\n\n def __init__(self, foo: 'Monitor'):\n self.foo = foo\n\n def __enter__(self) -> None:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.release()\n\n def __exit__(self, e1, e2, e3) -> bool:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.acquire()\n return False\n\n class acquire:\n \"\"\"\n Returns a context manager object that can lock another object,\n as long as that object is a monitor.\n\n Consider foo, which is a monitor. If you needed to lock it from\n outside, you would do:\n\n >>> with Monitor.acquire(foo):\n >>> .. do operations on foo that need mutual exclusion ..\n \"\"\"\n __slots__ = ('foo',)\n\n def __init__(self, foo: 'Monitor'):\n self.foo = foo\n\n def __enter__(self) -> None:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.acquire()\n\n def __exit__(self, e1, e2, e3) -> bool:\n # noinspection PyProtectedMember\n self.foo._monitor_lock.release()\n return False\n\n @classmethod\n def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:\n \"\"\"\n A decorator for locking on non-self Monitor objects\n\n Use it like:\n\n >>> class MasterClass(Monitor):\n >>> def get_object(self):\n >>> class SlaveClass:\n >>> @Monitor.synchronize_on(self)\n >>> def get_object(self2):\n >>> ...\n >>> return SlaveClass\n \"\"\"\n\n def outer(fun):\n @wraps(fun)\n def inner(*args, **kwargs):\n with cls.acquire(monitor):\n return fun(*args, **kwargs)\n\n return inner\n\n return outer\n\n\nclass RMonitor(Monitor):\n \"\"\"\n Monitor, but using an reentrant lock instead of a normal one\n \"\"\"\n\n def __init__(self):\n self._monitor_lock = threading.RLock() # type: threading.RLock\n\n\nclass MonitorList(tp.Generic[T], collections.UserList, Monitor):\n \"\"\"\n A list that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args):\n collections.UserList.__init__(self, *args)\n Monitor.__init__(self)\n\n def __copy__(self) -> 'MonitorList':\n return MonitorList(copy.copy(self.data))\n\n def __deepcopy__(self, memo) -> 'MonitorList':\n return MonitorList(copy.deepcopy(self.data, memo=memo))\n\n def __getitem__(self, item: tp.Union[slice, int]) -> T:\n return self.data[item]\n\n def __setitem__(self, key: int, value: T) -> None:\n self.data[key] = value\n\n def __delitem__(self, key: tp.Union[slice, int]) -> None:\n del self.data[key]\n\n\nclass MonitorDict(tp.Generic[K, V], collections.UserDict, Monitor):\n \"\"\"\n A dict that is also a monitor.\n\n Note that access to it's properties is not automatically synchronized, you got to\n invoke the monitor to implement an opportunistic locking of your own choice\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n collections.UserDict.__init__(self, *args, **kwargs)\n Monitor.__init__(self)\n\n def __getitem__(self, item: K) -> V:\n return self.data[item]\n\n def __setitem__(self, key: K, value: V) -> None:\n self.data[key] = value\n\n def __delitem__(self, key: K) -> None:\n del self.data[key]\n\n def __copy__(self) -> 'MonitorDict':\n return MonitorDict(copy.copy(self.data))\n\n def __deepcopy__(self, memo) -> 'MonitorDict':\n return MonitorDict(copy.deepcopy(self.data, memo=memo))\n\n\nclass MonitorSet(set, Monitor):\n \"\"\"\n A set that allows atomic insert-if-not-already-there operation\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(*args)\n Monitor.__init__(self)\n\n def insert_and_check(self, item) -> bool:\n \"\"\"\n Perform an atomic insert if not already in set\n\n :param item: item to insert\n :return: whether the item was successfully inserted\n \"\"\"\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True\n", "step-ids": [ 17, 20, 22, 23, 33 ] }
[ 17, 20, 22, 23, 33 ]
ALPACA_KEY = 'Enter your apaca key here' ALPACA_SECRET_KEY = 'Enter your apaca secret key here' ALPACA_MARKET = 'enter alpaca market link here' TWILIO_KEY = 'enter your twilio key here' TWILIO_SECRET_KEY = 'enter your twilio secret key here' YOUR_PHONE_NUMBER = 'Enter your phone number' YOUR_TWILIO_NUMBER = 'Enter your twilio phone number'
normal
{ "blob_id": "10cb4b59d1e1e823c56ae5ceea0514b1c1904292", "index": 3769, "step-1": "<mask token>\n", "step-2": "ALPACA_KEY = 'Enter your apaca key here'\nALPACA_SECRET_KEY = 'Enter your apaca secret key here'\nALPACA_MARKET = 'enter alpaca market link here'\nTWILIO_KEY = 'enter your twilio key here'\nTWILIO_SECRET_KEY = 'enter your twilio secret key here'\nYOUR_PHONE_NUMBER = 'Enter your phone number'\nYOUR_TWILIO_NUMBER = 'Enter your twilio phone number'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import os import unittest import json from flask_sqlalchemy import SQLAlchemy from app import create_app from models import * from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) # auth tokens should be updated before running tests, # make sure update the tokens in setup.sh # read the README to know more details CASTING_ASSISTANT_TOKEN = os.environ.get( "CASTING_ASSISTANT_TOKEN", "abc123abc1234" ) CASTING_DIRECTOR_TOKEN = os.environ.get( "CASTING_DIRECTOR_TOKEN", "abc123abc1234" ) EXECUTIVE_PRODUCER_TOKEN = os.environ.get( "EXECUTIVE_PRODUCER_TOKEN", "abc123abc1234" ) class CastingAgencyTestCase(unittest.TestCase): """This class has the test cases for casting agency web app endpoints""" def setUp(self): """Define test variables and initialize app.""" self.app = create_app() self.client = self.app.test_client self.database_name = os.environ.get( "TEST_DATABASE_NAME", "abc123abc1234" ) self.database_path = "postgres://postgres:postgres@{}/{}".format( "localhost:5432", self.database_name ) setup_db(self.app, self.database_path) # drop db, create and populate with test data setup_db_for_test() self.casting_assistant_auth_header = { "Authorization": "Bearer " + CASTING_ASSISTANT_TOKEN } self.casting_director_auth_header = { "Authorization": "Bearer " + CASTING_DIRECTOR_TOKEN } self.executive_producer_auth_header = { "Authorization": "Bearer " + EXECUTIVE_PRODUCER_TOKEN } self.create_actor_success = { "name": "Chris Hemsworth", "age": 37, "gender": "Male", } self.create_actor_fail = { "name": "Chris Evans", "age": 39, } self.create_movie_success = { "title": "Captain America: Civil War", "release_date": "12/04/2016", "actors_ids": [1, 2, 3], } self.create_movie_fail_1 = { "title": "Avenger: Infinity War", } self.create_movie_fail_2 = { "title": "Avenger: Infinity War", "release_date": "27/04/2018", "actors_ids": [], } self.create_movie_fail_3 = { "title": "Avenger: Infinity War", "release_date": "27/04/2018", "actors_ids": [100], } # binds the app to the current context with self.app.app_context(): self.db = SQLAlchemy() self.db.init_app(self.app) # create all tables self.db.create_all() # test get actors endpoint def test_get_actors(self): res = self.client().get( "/actors", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actors"])) # test create actor endpoint with casting director auth token def test_create_actors_success_director(self): res = self.client().post( "/actors", headers=self.casting_director_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actor"])) # test create actor endpoint with executive producer auth token def test_create_actors_success_producer(self): res = self.client().post( "/actors", headers=self.executive_producer_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actor"])) # create actor fails due authentication failure with casting # assistant auth token def test_create_actors_401_failure_assistant(self): res = self.client().post( "/actors", headers=self.casting_assistant_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # create actor fails due to incomplete input def test_422_if_create_actor_fails(self): res = self.client().post( "/actors", headers=self.executive_producer_auth_header, json=self.create_actor_fail, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 422) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "unprocessable") # test update actors with executive producer auth token def test_update_actors_success_producer(self): res = self.client().patch( "/actors/1", headers=self.executive_producer_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actor"])) # test update actors with casting director auth token def test_update_actors_success_director(self): res = self.client().patch( "/actors/1", headers=self.casting_director_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actor"])) # update actor fails due authentication failure # with casting assitant auth token def test_update_actors_401_failure_assistant(self): res = self.client().patch( "/actors/1", headers=self.casting_assistant_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # test update actor faiure if actor with id doesnot # exists in database def test_update_actors_404_failure(self): res = self.client().patch( "/actors/100", headers=self.casting_director_auth_header, json=self.create_actor_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test successfull delete actor with executive producer auth token def test_delete_actors_success_producer(self): res = self.client().delete( "/actors/1", headers=self.executive_producer_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertEqual(res_data["actor_id"], 1) # test successfull delete actor with casting director auth token def test_delete_actors_success_director(self): res = self.client().delete( "/actors/1", headers=self.casting_director_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertEqual(res_data["actor_id"], 1) # delete actor fails due authentication failure # with casting director auth token def test_delete_actors_401_failure_assistant(self): res = self.client().delete( "/actors/1", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # delete actor failure if actor with input # id doesnot exits def test_delete_actors_404_failure(self): res = self.client().delete( "/actors/100", headers=self.casting_director_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test get movie endpoint def test_get_movies(self): res = self.client().get( "/movies", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["movies"])) # test create movie authentication failure # with casting director auth token def test_create_movies_401_failure_director(self): res = self.client().post( "/movies", headers=self.casting_director_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # test create movies success with executive producer # auth token def test_create_movies_success_producer(self): res = self.client().post( "/movies", headers=self.executive_producer_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["movie"])) # create actor fails due authentication failure # with casting assistant auth token def test_create_movies_401_failure_assistant(self): res = self.client().post( "/movies", headers=self.casting_assistant_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # create actor fails due to incomplete input def test_422_create_movie_fails_incomplete_info(self): res = self.client().post( "/movies", headers=self.executive_producer_auth_header, json=self.create_movie_fail_1, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 422) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "unprocessable") # create movie fails due to incomplete input, no input actor ids def test_422_create_movie_fails_no_actor_input_info(self): res = self.client().post( "/movies", headers=self.executive_producer_auth_header, json=self.create_movie_fail_2, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 422) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "unprocessable") # create movie fails due to wrong actor id input def test_404_create_movie_fails_wrong_actor_id(self): res = self.client().post( "/movies", headers=self.executive_producer_auth_header, json=self.create_movie_fail_3, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test update movie success with executive producer # auth token def test_update_movies_success_producer(self): res = self.client().patch( "/movies/1", headers=self.executive_producer_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["movie"])) # test update movies success with casting # director auth token def test_update_movies_success_director(self): res = self.client().patch( "/movies/1", headers=self.casting_director_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["movie"])) # update actor fails due authentication failure # with casting assitant auth token def test_update_movies_401_failure_assistant(self): res = self.client().patch( "/movies/1", headers=self.casting_assistant_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # test update movies failure if movie with # input id does not exists def test_update_movies_404_failure(self): res = self.client().patch( "/movies/100", headers=self.casting_director_auth_header, json=self.create_movie_success, ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test delete movies success with executive producer # auth token def test_delete_movies_success_producer(self): res = self.client().delete( "/movies/1", headers=self.executive_producer_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertEqual(res_data["movie_id"], 1) # test delete movies failure with casting director # auth token def test_delete_movies_401_failure_director(self): res = self.client().delete( "/movies/1", headers=self.casting_director_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # test delete actor fails due authentication failure # with casting assitant auth token def test_delete_actors_401_failure_assistant(self): res = self.client().delete( "/movies/1", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 401) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "Permission missing.") # test delete actor failure if actor with input id # doesnot exists def test_delete_actors_404_failure(self): res = self.client().delete( "/movies/100", headers=self.executive_producer_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test get actor by movies success def test_get_actors_by_movies(self): res = self.client().get( "/movies/1/actors", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["actors"])) # test get actor by movies failure if movie # with input id does not exits def test_404_get_actors_by_movies(self): res = self.client().get( "/movies/100/actors", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # test get movies by actor success def test_get_movies_by_actors(self): res = self.client().get( "/actors/1/movies", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 200) self.assertEqual(res_data["success"], True) self.assertTrue(len(res_data["movies"])) # test get movies by actor failure if actor # with input id does not exists def test_404_get_movies_by_actors(self): res = self.client().get( "/actors/100/movies", headers=self.casting_assistant_auth_header ) res_data = json.loads(res.data) self.assertEqual(res.status_code, 404) self.assertEqual(res_data["success"], False) self.assertEqual(res_data["message"], "resource not found") # Make the tests conveniently executable if __name__ == "__main__": unittest.main()
normal
{ "blob_id": "bae4eb94d561f7aa810718840ff7c2de52cb0d6f", "index": 3228, "step-1": "<mask token>\n\n\nclass CastingAgencyTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = os.environ.get('TEST_DATABASE_NAME',\n 'abc123abc1234')\n self.database_path = 'postgres://postgres:postgres@{}/{}'.format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n setup_db_for_test()\n self.casting_assistant_auth_header = {'Authorization': 'Bearer ' +\n CASTING_ASSISTANT_TOKEN}\n self.casting_director_auth_header = {'Authorization': 'Bearer ' +\n CASTING_DIRECTOR_TOKEN}\n self.executive_producer_auth_header = {'Authorization': 'Bearer ' +\n EXECUTIVE_PRODUCER_TOKEN}\n self.create_actor_success = {'name': 'Chris Hemsworth', 'age': 37,\n 'gender': 'Male'}\n self.create_actor_fail = {'name': 'Chris Evans', 'age': 39}\n self.create_movie_success = {'title': 'Captain America: Civil War',\n 'release_date': '12/04/2016', 'actors_ids': [1, 2, 3]}\n self.create_movie_fail_1 = {'title': 'Avenger: Infinity War'}\n self.create_movie_fail_2 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': []}\n self.create_movie_fail_3 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': [100]}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n <mask token>\n <mask token>\n\n def test_create_actors_success_producer(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_create_actors_401_failure_assistant(self):\n res = self.client().post('/actors', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n <mask token>\n\n def test_update_actors_success_producer(self):\n res = self.client().patch('/actors/1', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_success_director(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_401_failure_assistant(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n <mask token>\n\n def test_delete_actors_success_producer(self):\n res = self.client().delete('/actors/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_success_director(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/actors/100', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n <mask token>\n <mask token>\n\n def test_create_movies_success_producer(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_create_movies_401_failure_assistant(self):\n res = self.client().post('/movies', headers=self.\n casting_assistant_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_update_movies_success_director(self):\n res = self.client().patch('/movies/1', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n <mask token>\n <mask token>\n\n def test_delete_movies_success_producer(self):\n res = self.client().delete('/movies/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['movie_id'], 1)\n\n def test_delete_movies_401_failure_director(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/movies/100', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_actors_by_movies(self):\n res = self.client().get('/movies/1/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n\n def test_404_get_actors_by_movies(self):\n res = self.client().get('/movies/100/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n <mask token>\n\n def test_404_get_movies_by_actors(self):\n res = self.client().get('/actors/100/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CastingAgencyTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = os.environ.get('TEST_DATABASE_NAME',\n 'abc123abc1234')\n self.database_path = 'postgres://postgres:postgres@{}/{}'.format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n setup_db_for_test()\n self.casting_assistant_auth_header = {'Authorization': 'Bearer ' +\n CASTING_ASSISTANT_TOKEN}\n self.casting_director_auth_header = {'Authorization': 'Bearer ' +\n CASTING_DIRECTOR_TOKEN}\n self.executive_producer_auth_header = {'Authorization': 'Bearer ' +\n EXECUTIVE_PRODUCER_TOKEN}\n self.create_actor_success = {'name': 'Chris Hemsworth', 'age': 37,\n 'gender': 'Male'}\n self.create_actor_fail = {'name': 'Chris Evans', 'age': 39}\n self.create_movie_success = {'title': 'Captain America: Civil War',\n 'release_date': '12/04/2016', 'actors_ids': [1, 2, 3]}\n self.create_movie_fail_1 = {'title': 'Avenger: Infinity War'}\n self.create_movie_fail_2 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': []}\n self.create_movie_fail_3 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': [100]}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def test_get_actors(self):\n res = self.client().get('/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n <mask token>\n\n def test_create_actors_success_producer(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_create_actors_401_failure_assistant(self):\n res = self.client().post('/actors', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_if_create_actor_fails(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_fail)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_update_actors_success_producer(self):\n res = self.client().patch('/actors/1', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_success_director(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_401_failure_assistant(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_update_actors_404_failure(self):\n res = self.client().patch('/actors/100', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_actors_success_producer(self):\n res = self.client().delete('/actors/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_success_director(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/actors/100', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_movies(self):\n res = self.client().get('/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movies']))\n\n def test_create_movies_401_failure_director(self):\n res = self.client().post('/movies', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_create_movies_success_producer(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_create_movies_401_failure_assistant(self):\n res = self.client().post('/movies', headers=self.\n casting_assistant_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_create_movie_fails_incomplete_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_1)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_422_create_movie_fails_no_actor_input_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_2)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_404_create_movie_fails_wrong_actor_id(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_3)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n <mask token>\n\n def test_update_movies_success_director(self):\n res = self.client().patch('/movies/1', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n <mask token>\n\n def test_update_movies_404_failure(self):\n res = self.client().patch('/movies/100', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_movies_success_producer(self):\n res = self.client().delete('/movies/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['movie_id'], 1)\n\n def test_delete_movies_401_failure_director(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/movies/100', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_actors_by_movies(self):\n res = self.client().get('/movies/1/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n\n def test_404_get_actors_by_movies(self):\n res = self.client().get('/movies/100/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n <mask token>\n\n def test_404_get_movies_by_actors(self):\n res = self.client().get('/actors/100/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass CastingAgencyTestCase(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = os.environ.get('TEST_DATABASE_NAME',\n 'abc123abc1234')\n self.database_path = 'postgres://postgres:postgres@{}/{}'.format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n setup_db_for_test()\n self.casting_assistant_auth_header = {'Authorization': 'Bearer ' +\n CASTING_ASSISTANT_TOKEN}\n self.casting_director_auth_header = {'Authorization': 'Bearer ' +\n CASTING_DIRECTOR_TOKEN}\n self.executive_producer_auth_header = {'Authorization': 'Bearer ' +\n EXECUTIVE_PRODUCER_TOKEN}\n self.create_actor_success = {'name': 'Chris Hemsworth', 'age': 37,\n 'gender': 'Male'}\n self.create_actor_fail = {'name': 'Chris Evans', 'age': 39}\n self.create_movie_success = {'title': 'Captain America: Civil War',\n 'release_date': '12/04/2016', 'actors_ids': [1, 2, 3]}\n self.create_movie_fail_1 = {'title': 'Avenger: Infinity War'}\n self.create_movie_fail_2 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': []}\n self.create_movie_fail_3 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': [100]}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def test_get_actors(self):\n res = self.client().get('/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n <mask token>\n\n def test_create_actors_success_producer(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_create_actors_401_failure_assistant(self):\n res = self.client().post('/actors', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_if_create_actor_fails(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_fail)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_update_actors_success_producer(self):\n res = self.client().patch('/actors/1', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_success_director(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_401_failure_assistant(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_update_actors_404_failure(self):\n res = self.client().patch('/actors/100', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_actors_success_producer(self):\n res = self.client().delete('/actors/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_success_director(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/actors/100', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_movies(self):\n res = self.client().get('/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movies']))\n\n def test_create_movies_401_failure_director(self):\n res = self.client().post('/movies', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_create_movies_success_producer(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_create_movies_401_failure_assistant(self):\n res = self.client().post('/movies', headers=self.\n casting_assistant_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_create_movie_fails_incomplete_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_1)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_422_create_movie_fails_no_actor_input_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_2)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_404_create_movie_fails_wrong_actor_id(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_3)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n <mask token>\n\n def test_update_movies_success_director(self):\n res = self.client().patch('/movies/1', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n <mask token>\n\n def test_update_movies_404_failure(self):\n res = self.client().patch('/movies/100', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_movies_success_producer(self):\n res = self.client().delete('/movies/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['movie_id'], 1)\n\n def test_delete_movies_401_failure_director(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/movies/100', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_actors_by_movies(self):\n res = self.client().get('/movies/1/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n\n def test_404_get_actors_by_movies(self):\n res = self.client().get('/movies/100/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_movies_by_actors(self):\n res = self.client().get('/actors/1/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movies']))\n\n def test_404_get_movies_by_actors(self):\n res = self.client().get('/actors/100/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass CastingAgencyTestCase(unittest.TestCase):\n \"\"\"This class has the test cases for casting agency web app endpoints\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = os.environ.get('TEST_DATABASE_NAME',\n 'abc123abc1234')\n self.database_path = 'postgres://postgres:postgres@{}/{}'.format(\n 'localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n setup_db_for_test()\n self.casting_assistant_auth_header = {'Authorization': 'Bearer ' +\n CASTING_ASSISTANT_TOKEN}\n self.casting_director_auth_header = {'Authorization': 'Bearer ' +\n CASTING_DIRECTOR_TOKEN}\n self.executive_producer_auth_header = {'Authorization': 'Bearer ' +\n EXECUTIVE_PRODUCER_TOKEN}\n self.create_actor_success = {'name': 'Chris Hemsworth', 'age': 37,\n 'gender': 'Male'}\n self.create_actor_fail = {'name': 'Chris Evans', 'age': 39}\n self.create_movie_success = {'title': 'Captain America: Civil War',\n 'release_date': '12/04/2016', 'actors_ids': [1, 2, 3]}\n self.create_movie_fail_1 = {'title': 'Avenger: Infinity War'}\n self.create_movie_fail_2 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': []}\n self.create_movie_fail_3 = {'title': 'Avenger: Infinity War',\n 'release_date': '27/04/2018', 'actors_ids': [100]}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def test_get_actors(self):\n res = self.client().get('/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n\n def test_create_actors_success_director(self):\n res = self.client().post('/actors', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_create_actors_success_producer(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_create_actors_401_failure_assistant(self):\n res = self.client().post('/actors', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_if_create_actor_fails(self):\n res = self.client().post('/actors', headers=self.\n executive_producer_auth_header, json=self.create_actor_fail)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_update_actors_success_producer(self):\n res = self.client().patch('/actors/1', headers=self.\n executive_producer_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_success_director(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actor']))\n\n def test_update_actors_401_failure_assistant(self):\n res = self.client().patch('/actors/1', headers=self.\n casting_assistant_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_update_actors_404_failure(self):\n res = self.client().patch('/actors/100', headers=self.\n casting_director_auth_header, json=self.create_actor_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_actors_success_producer(self):\n res = self.client().delete('/actors/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_success_director(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['actor_id'], 1)\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/actors/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/actors/100', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_movies(self):\n res = self.client().get('/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movies']))\n\n def test_create_movies_401_failure_director(self):\n res = self.client().post('/movies', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_create_movies_success_producer(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_create_movies_401_failure_assistant(self):\n res = self.client().post('/movies', headers=self.\n casting_assistant_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_422_create_movie_fails_incomplete_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_1)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_422_create_movie_fails_no_actor_input_info(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_2)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'unprocessable')\n\n def test_404_create_movie_fails_wrong_actor_id(self):\n res = self.client().post('/movies', headers=self.\n executive_producer_auth_header, json=self.create_movie_fail_3)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_update_movies_success_producer(self):\n res = self.client().patch('/movies/1', headers=self.\n executive_producer_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_update_movies_success_director(self):\n res = self.client().patch('/movies/1', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movie']))\n\n def test_update_movies_401_failure_assistant(self):\n res = self.client().patch('/movies/1', headers=self.\n casting_assistant_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_update_movies_404_failure(self):\n res = self.client().patch('/movies/100', headers=self.\n casting_director_auth_header, json=self.create_movie_success)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_delete_movies_success_producer(self):\n res = self.client().delete('/movies/1', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertEqual(res_data['movie_id'], 1)\n\n def test_delete_movies_401_failure_director(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_director_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete('/movies/1', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'Permission missing.')\n\n def test_delete_actors_404_failure(self):\n res = self.client().delete('/movies/100', headers=self.\n executive_producer_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_actors_by_movies(self):\n res = self.client().get('/movies/1/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['actors']))\n\n def test_404_get_actors_by_movies(self):\n res = self.client().get('/movies/100/actors', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n def test_get_movies_by_actors(self):\n res = self.client().get('/actors/1/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data['success'], True)\n self.assertTrue(len(res_data['movies']))\n\n def test_404_get_movies_by_actors(self):\n res = self.client().get('/actors/100/movies', headers=self.\n casting_assistant_auth_header)\n res_data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data['success'], False)\n self.assertEqual(res_data['message'], 'resource not found')\n\n\n<mask token>\n", "step-5": "import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom app import create_app\nfrom models import *\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\n# auth tokens should be updated before running tests,\n# make sure update the tokens in setup.sh\n# read the README to know more details\nCASTING_ASSISTANT_TOKEN = os.environ.get(\n \"CASTING_ASSISTANT_TOKEN\",\n \"abc123abc1234\"\n )\n\nCASTING_DIRECTOR_TOKEN = os.environ.get(\n \"CASTING_DIRECTOR_TOKEN\",\n \"abc123abc1234\"\n )\n\nEXECUTIVE_PRODUCER_TOKEN = os.environ.get(\n \"EXECUTIVE_PRODUCER_TOKEN\",\n \"abc123abc1234\"\n )\n\n\nclass CastingAgencyTestCase(unittest.TestCase):\n \"\"\"This class has the test cases for casting agency web app endpoints\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = os.environ.get(\n \"TEST_DATABASE_NAME\",\n \"abc123abc1234\"\n )\n self.database_path = \"postgres://postgres:postgres@{}/{}\".format(\n \"localhost:5432\", self.database_name\n )\n setup_db(self.app, self.database_path)\n\n # drop db, create and populate with test data\n setup_db_for_test()\n\n self.casting_assistant_auth_header = {\n \"Authorization\": \"Bearer \" + CASTING_ASSISTANT_TOKEN\n }\n\n self.casting_director_auth_header = {\n \"Authorization\": \"Bearer \" + CASTING_DIRECTOR_TOKEN\n }\n\n self.executive_producer_auth_header = {\n \"Authorization\": \"Bearer \" + EXECUTIVE_PRODUCER_TOKEN\n }\n\n self.create_actor_success = {\n \"name\": \"Chris Hemsworth\",\n \"age\": 37,\n \"gender\": \"Male\",\n }\n\n self.create_actor_fail = {\n \"name\": \"Chris Evans\",\n \"age\": 39,\n }\n\n self.create_movie_success = {\n \"title\": \"Captain America: Civil War\",\n \"release_date\": \"12/04/2016\",\n \"actors_ids\": [1, 2, 3],\n }\n\n self.create_movie_fail_1 = {\n \"title\": \"Avenger: Infinity War\",\n }\n\n self.create_movie_fail_2 = {\n \"title\": \"Avenger: Infinity War\",\n \"release_date\": \"27/04/2018\",\n \"actors_ids\": [],\n }\n\n self.create_movie_fail_3 = {\n \"title\": \"Avenger: Infinity War\",\n \"release_date\": \"27/04/2018\",\n \"actors_ids\": [100],\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n # test get actors endpoint\n def test_get_actors(self):\n res = self.client().get(\n \"/actors\",\n headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actors\"]))\n\n # test create actor endpoint with casting director auth token\n def test_create_actors_success_director(self):\n res = self.client().post(\n \"/actors\",\n headers=self.casting_director_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actor\"]))\n\n # test create actor endpoint with executive producer auth token\n def test_create_actors_success_producer(self):\n res = self.client().post(\n \"/actors\",\n headers=self.executive_producer_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actor\"]))\n\n # create actor fails due authentication failure with casting\n # assistant auth token\n def test_create_actors_401_failure_assistant(self):\n res = self.client().post(\n \"/actors\",\n headers=self.casting_assistant_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # create actor fails due to incomplete input\n def test_422_if_create_actor_fails(self):\n res = self.client().post(\n \"/actors\",\n headers=self.executive_producer_auth_header,\n json=self.create_actor_fail,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"unprocessable\")\n\n # test update actors with executive producer auth token\n def test_update_actors_success_producer(self):\n res = self.client().patch(\n \"/actors/1\",\n headers=self.executive_producer_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actor\"]))\n\n # test update actors with casting director auth token\n def test_update_actors_success_director(self):\n res = self.client().patch(\n \"/actors/1\",\n headers=self.casting_director_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actor\"]))\n\n # update actor fails due authentication failure\n # with casting assitant auth token\n def test_update_actors_401_failure_assistant(self):\n res = self.client().patch(\n \"/actors/1\",\n headers=self.casting_assistant_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # test update actor faiure if actor with id doesnot\n # exists in database\n def test_update_actors_404_failure(self):\n res = self.client().patch(\n \"/actors/100\",\n headers=self.casting_director_auth_header,\n json=self.create_actor_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test successfull delete actor with executive producer auth token\n def test_delete_actors_success_producer(self):\n res = self.client().delete(\n \"/actors/1\", headers=self.executive_producer_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertEqual(res_data[\"actor_id\"], 1)\n\n # test successfull delete actor with casting director auth token\n def test_delete_actors_success_director(self):\n res = self.client().delete(\n \"/actors/1\", headers=self.casting_director_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertEqual(res_data[\"actor_id\"], 1)\n\n # delete actor fails due authentication failure\n # with casting director auth token\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete(\n \"/actors/1\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # delete actor failure if actor with input\n # id doesnot exits\n def test_delete_actors_404_failure(self):\n res = self.client().delete(\n \"/actors/100\", headers=self.casting_director_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test get movie endpoint\n def test_get_movies(self):\n res = self.client().get(\n \"/movies\",\n headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"movies\"]))\n\n # test create movie authentication failure\n # with casting director auth token\n def test_create_movies_401_failure_director(self):\n res = self.client().post(\n \"/movies\",\n headers=self.casting_director_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # test create movies success with executive producer\n # auth token\n def test_create_movies_success_producer(self):\n res = self.client().post(\n \"/movies\",\n headers=self.executive_producer_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"movie\"]))\n\n # create actor fails due authentication failure\n # with casting assistant auth token\n def test_create_movies_401_failure_assistant(self):\n res = self.client().post(\n \"/movies\",\n headers=self.casting_assistant_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # create actor fails due to incomplete input\n def test_422_create_movie_fails_incomplete_info(self):\n res = self.client().post(\n \"/movies\",\n headers=self.executive_producer_auth_header,\n json=self.create_movie_fail_1,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"unprocessable\")\n\n # create movie fails due to incomplete input, no input actor ids\n def test_422_create_movie_fails_no_actor_input_info(self):\n res = self.client().post(\n \"/movies\",\n headers=self.executive_producer_auth_header,\n json=self.create_movie_fail_2,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"unprocessable\")\n\n # create movie fails due to wrong actor id input\n def test_404_create_movie_fails_wrong_actor_id(self):\n res = self.client().post(\n \"/movies\",\n headers=self.executive_producer_auth_header,\n json=self.create_movie_fail_3,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test update movie success with executive producer\n # auth token\n def test_update_movies_success_producer(self):\n res = self.client().patch(\n \"/movies/1\",\n headers=self.executive_producer_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"movie\"]))\n\n # test update movies success with casting\n # director auth token\n def test_update_movies_success_director(self):\n res = self.client().patch(\n \"/movies/1\",\n headers=self.casting_director_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"movie\"]))\n\n # update actor fails due authentication failure\n # with casting assitant auth token\n def test_update_movies_401_failure_assistant(self):\n res = self.client().patch(\n \"/movies/1\",\n headers=self.casting_assistant_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # test update movies failure if movie with\n # input id does not exists\n def test_update_movies_404_failure(self):\n res = self.client().patch(\n \"/movies/100\",\n headers=self.casting_director_auth_header,\n json=self.create_movie_success,\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test delete movies success with executive producer\n # auth token\n def test_delete_movies_success_producer(self):\n res = self.client().delete(\n \"/movies/1\", headers=self.executive_producer_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertEqual(res_data[\"movie_id\"], 1)\n\n # test delete movies failure with casting director\n # auth token\n def test_delete_movies_401_failure_director(self):\n res = self.client().delete(\n \"/movies/1\", headers=self.casting_director_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # test delete actor fails due authentication failure\n # with casting assitant auth token\n def test_delete_actors_401_failure_assistant(self):\n res = self.client().delete(\n \"/movies/1\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"Permission missing.\")\n\n # test delete actor failure if actor with input id\n # doesnot exists\n def test_delete_actors_404_failure(self):\n res = self.client().delete(\n \"/movies/100\", headers=self.executive_producer_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test get actor by movies success\n def test_get_actors_by_movies(self):\n res = self.client().get(\n \"/movies/1/actors\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"actors\"]))\n\n # test get actor by movies failure if movie\n # with input id does not exits\n def test_404_get_actors_by_movies(self):\n res = self.client().get(\n \"/movies/100/actors\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n # test get movies by actor success\n def test_get_movies_by_actors(self):\n res = self.client().get(\n \"/actors/1/movies\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res_data[\"success\"], True)\n self.assertTrue(len(res_data[\"movies\"]))\n\n # test get movies by actor failure if actor\n # with input id does not exists\n def test_404_get_movies_by_actors(self):\n res = self.client().get(\n \"/actors/100/movies\", headers=self.casting_assistant_auth_header\n )\n res_data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res_data[\"success\"], False)\n self.assertEqual(res_data[\"message\"], \"resource not found\")\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n", "step-ids": [ 21, 30, 31, 35, 39 ] }
[ 21, 30, 31, 35, 39 ]
"""IDQ Importer Exporter This script defines Import and Export functions through which it can communicate with a Informatica Model Repository. It also provides some related functions, such as: - Create IDQ folder - Check in IDQ components Parts by Laurens Verhoeven Parts by Jac. Beekers @Version: 20190412.0 - JBE - Initial version to work with deploy lists @License: MIT """ # MIT License # # Copyright (c) 2019 Jac. Beekers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import datetime import supporting, logging from cicd.informatica import buildCommand from cicd.informatica import executeInfacmd from cicd.informatica import infaConstants as constants logger = logging.getLogger(__name__) def import_infadeveloper(**KeyWordArguments): """Import IDQ Components""" KeyWordArguments["Tool"] = "Import" ImportCommand = buildCommand.build(**KeyWordArguments) result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT) return result def export_infadeveloper(**KeyWordArguments): thisproc = "export_infadeveloper" KeyWordArguments["Tool"] = "Export" ExportCommand = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "ExportCommand is >" + ExportCommand + "<.") result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT) return result def CreateFolder(**KeyWordArguments): """Create IDQ Folder""" KeyWordArguments["Tool"] = "CreateFolder" CreateFolder = buildCommand.build(**KeyWordArguments) output, error = executeInfacmd.execute(CreateFolder) return (output, error) def ListCheckedOutObjects(**KeyWordArguments): thisproc = "ListCheckedOutObjects" """ List Components that are currently checked out """ KeyWordArguments["Tool"] = "ListCheckOutObjects" ListCheckedOutCommand = buildCommand.build(**KeyWordArguments) output, error = executeInfacmd.execute(ListCheckedOutCommand) # The output is in the form of one object per line, with properties spearated by a comma + space. # To filter out irrelevant lines, such as "Command succesful", we keep only line that start with "MRS_PATH=" OutputLines = output.splitlines() OutputKeyValuePairLines = [Properties.split(", ") for Properties in OutputLines if Properties.startswith("MRS_PATH=")] # ObjectsOLD = [[KVPair.split("=", 1) for KVPair in Line] for Line in OutputKeyValuePairLines] # Each object is a dictionary, with properties as keys # Since the date field has a comma in it, its not parsed properly. For this reason we need the len == 2 filter # If the date is required, the parsing of the output should be adjusted Objects = [dict(KVPair.split("=") for KVPair in Line if len(KVPair.split("=")) == 2) for Line in OutputKeyValuePairLines] supporting.log(logger, logging.DEBUG, thisproc, output) return Objects def CheckIn(**KeyWordArguments): """Check-in IDQ Components""" KeyWordArguments["Tool"] = "CheckIn" CheckInCommand = buildCommand.build(**KeyWordArguments) output, error = executeInfacmd.execute(CheckInCommand) return (output, error) def CheckInMutiple(**KeyWordArguments): thisproc = "CheckInMultiple" """ Check in Multiple IDQ components """ for key, value in KeyWordArguments.items(): if key == "MultipleObjectPaths": ObjectPaths = KeyWordArguments["MultipleObjectPaths"] KeyWordArguments["Tool"] = "CheckIn" CheckInCommands = [] for ObjectPathName in ObjectPaths: KeyWordArguments["ObjectPathName"] = ObjectPathName CheckInCommands.append(buildCommand.build(**KeyWordArguments)) CheckInAllCommand = "\n".join(CheckInCommands) timebefore = datetime.datetime.now() output, error = executeInfacmd.execute(CheckInAllCommand) timeafter = datetime.datetime.now() duration = timeafter - timebefore supporting.log(logging.DEBUG, thisproc, "Infacmd took " + str(duration) + " seconds to check-in " + str(len(ObjectPaths)) + " objects") # output, error = (CheckInAllCommand, 0) return (output, error) def create_iar_file(**KeyWordArguments): thisproc = "create_iar_file" KeyWordArguments["Tool"] = "CreateIAR" create_command = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "Command is >" + create_command + "<.") result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT) return result def deploy_iar_file(**KeyWordArguments): thisproc = "deploy_iar_file" KeyWordArguments["Tool"] = "DeployIAR" deploy_command = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.") result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT) return result def redeploy_iar_file(**KeyWordArguments): thisproc = "redeploy_iar_file" KeyWordArguments["Tool"] = "RedeployIAR" deploy_command = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.") result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT) return result def stop_app(**KeyWordArguments): thisproc = "stop_app" KeyWordArguments["Tool"] = "StopApp" deploy_command = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.") result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT) return result def set_app_privileges(**KeyWordArguments): thisproc = "set_app_privileges" KeyWordArguments["Tool"] = "AppPrivileges" deploy_command = buildCommand.build(**KeyWordArguments) supporting.log(logger, logging.INFO, thisproc, "Command is >" + deploy_command + "<.") result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT) return result
normal
{ "blob_id": "09b14705a6905470058b5eecc6dd0bb214975c66", "index": 6408, "step-1": "<mask token>\n\n\ndef import_infadeveloper(**KeyWordArguments):\n \"\"\"Import IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'Import'\n ImportCommand = buildCommand.build(**KeyWordArguments)\n result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)\n return result\n\n\ndef export_infadeveloper(**KeyWordArguments):\n thisproc = 'export_infadeveloper'\n KeyWordArguments['Tool'] = 'Export'\n ExportCommand = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'ExportCommand is >' +\n ExportCommand + '<.')\n result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)\n return result\n\n\ndef CreateFolder(**KeyWordArguments):\n \"\"\"Create IDQ Folder\"\"\"\n KeyWordArguments['Tool'] = 'CreateFolder'\n CreateFolder = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CreateFolder)\n return output, error\n\n\ndef ListCheckedOutObjects(**KeyWordArguments):\n thisproc = 'ListCheckedOutObjects'\n \"\"\" List Components that are currently checked out \"\"\"\n KeyWordArguments['Tool'] = 'ListCheckOutObjects'\n ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(ListCheckedOutCommand)\n OutputLines = output.splitlines()\n OutputKeyValuePairLines = [Properties.split(', ') for Properties in\n OutputLines if Properties.startswith('MRS_PATH=')]\n Objects = [dict(KVPair.split('=') for KVPair in Line if len(KVPair.\n split('=')) == 2) for Line in OutputKeyValuePairLines]\n supporting.log(logger, logging.DEBUG, thisproc, output)\n return Objects\n\n\ndef CheckIn(**KeyWordArguments):\n \"\"\"Check-in IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CheckInCommand)\n return output, error\n\n\ndef CheckInMutiple(**KeyWordArguments):\n thisproc = 'CheckInMultiple'\n \"\"\" Check in Multiple IDQ components \"\"\"\n for key, value in KeyWordArguments.items():\n if key == 'MultipleObjectPaths':\n ObjectPaths = KeyWordArguments['MultipleObjectPaths']\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommands = []\n for ObjectPathName in ObjectPaths:\n KeyWordArguments['ObjectPathName'] = ObjectPathName\n CheckInCommands.append(buildCommand.build(**KeyWordArguments))\n CheckInAllCommand = '\\n'.join(CheckInCommands)\n timebefore = datetime.datetime.now()\n output, error = executeInfacmd.execute(CheckInAllCommand)\n timeafter = datetime.datetime.now()\n duration = timeafter - timebefore\n supporting.log(logging.DEBUG, thisproc, 'Infacmd took ' + str(duration) +\n ' seconds to check-in ' + str(len(ObjectPaths)) + ' objects')\n return output, error\n\n\ndef create_iar_file(**KeyWordArguments):\n thisproc = 'create_iar_file'\n KeyWordArguments['Tool'] = 'CreateIAR'\n create_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n create_command + '<.')\n result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)\n return result\n\n\ndef deploy_iar_file(**KeyWordArguments):\n thisproc = 'deploy_iar_file'\n KeyWordArguments['Tool'] = 'DeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef redeploy_iar_file(**KeyWordArguments):\n thisproc = 'redeploy_iar_file'\n KeyWordArguments['Tool'] = 'RedeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\n<mask token>\n\n\ndef set_app_privileges(**KeyWordArguments):\n thisproc = 'set_app_privileges'\n KeyWordArguments['Tool'] = 'AppPrivileges'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n", "step-2": "<mask token>\n\n\ndef import_infadeveloper(**KeyWordArguments):\n \"\"\"Import IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'Import'\n ImportCommand = buildCommand.build(**KeyWordArguments)\n result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)\n return result\n\n\ndef export_infadeveloper(**KeyWordArguments):\n thisproc = 'export_infadeveloper'\n KeyWordArguments['Tool'] = 'Export'\n ExportCommand = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'ExportCommand is >' +\n ExportCommand + '<.')\n result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)\n return result\n\n\ndef CreateFolder(**KeyWordArguments):\n \"\"\"Create IDQ Folder\"\"\"\n KeyWordArguments['Tool'] = 'CreateFolder'\n CreateFolder = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CreateFolder)\n return output, error\n\n\ndef ListCheckedOutObjects(**KeyWordArguments):\n thisproc = 'ListCheckedOutObjects'\n \"\"\" List Components that are currently checked out \"\"\"\n KeyWordArguments['Tool'] = 'ListCheckOutObjects'\n ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(ListCheckedOutCommand)\n OutputLines = output.splitlines()\n OutputKeyValuePairLines = [Properties.split(', ') for Properties in\n OutputLines if Properties.startswith('MRS_PATH=')]\n Objects = [dict(KVPair.split('=') for KVPair in Line if len(KVPair.\n split('=')) == 2) for Line in OutputKeyValuePairLines]\n supporting.log(logger, logging.DEBUG, thisproc, output)\n return Objects\n\n\ndef CheckIn(**KeyWordArguments):\n \"\"\"Check-in IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CheckInCommand)\n return output, error\n\n\ndef CheckInMutiple(**KeyWordArguments):\n thisproc = 'CheckInMultiple'\n \"\"\" Check in Multiple IDQ components \"\"\"\n for key, value in KeyWordArguments.items():\n if key == 'MultipleObjectPaths':\n ObjectPaths = KeyWordArguments['MultipleObjectPaths']\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommands = []\n for ObjectPathName in ObjectPaths:\n KeyWordArguments['ObjectPathName'] = ObjectPathName\n CheckInCommands.append(buildCommand.build(**KeyWordArguments))\n CheckInAllCommand = '\\n'.join(CheckInCommands)\n timebefore = datetime.datetime.now()\n output, error = executeInfacmd.execute(CheckInAllCommand)\n timeafter = datetime.datetime.now()\n duration = timeafter - timebefore\n supporting.log(logging.DEBUG, thisproc, 'Infacmd took ' + str(duration) +\n ' seconds to check-in ' + str(len(ObjectPaths)) + ' objects')\n return output, error\n\n\ndef create_iar_file(**KeyWordArguments):\n thisproc = 'create_iar_file'\n KeyWordArguments['Tool'] = 'CreateIAR'\n create_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n create_command + '<.')\n result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)\n return result\n\n\ndef deploy_iar_file(**KeyWordArguments):\n thisproc = 'deploy_iar_file'\n KeyWordArguments['Tool'] = 'DeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef redeploy_iar_file(**KeyWordArguments):\n thisproc = 'redeploy_iar_file'\n KeyWordArguments['Tool'] = 'RedeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef stop_app(**KeyWordArguments):\n thisproc = 'stop_app'\n KeyWordArguments['Tool'] = 'StopApp'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef set_app_privileges(**KeyWordArguments):\n thisproc = 'set_app_privileges'\n KeyWordArguments['Tool'] = 'AppPrivileges'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n", "step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\ndef import_infadeveloper(**KeyWordArguments):\n \"\"\"Import IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'Import'\n ImportCommand = buildCommand.build(**KeyWordArguments)\n result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)\n return result\n\n\ndef export_infadeveloper(**KeyWordArguments):\n thisproc = 'export_infadeveloper'\n KeyWordArguments['Tool'] = 'Export'\n ExportCommand = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'ExportCommand is >' +\n ExportCommand + '<.')\n result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)\n return result\n\n\ndef CreateFolder(**KeyWordArguments):\n \"\"\"Create IDQ Folder\"\"\"\n KeyWordArguments['Tool'] = 'CreateFolder'\n CreateFolder = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CreateFolder)\n return output, error\n\n\ndef ListCheckedOutObjects(**KeyWordArguments):\n thisproc = 'ListCheckedOutObjects'\n \"\"\" List Components that are currently checked out \"\"\"\n KeyWordArguments['Tool'] = 'ListCheckOutObjects'\n ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(ListCheckedOutCommand)\n OutputLines = output.splitlines()\n OutputKeyValuePairLines = [Properties.split(', ') for Properties in\n OutputLines if Properties.startswith('MRS_PATH=')]\n Objects = [dict(KVPair.split('=') for KVPair in Line if len(KVPair.\n split('=')) == 2) for Line in OutputKeyValuePairLines]\n supporting.log(logger, logging.DEBUG, thisproc, output)\n return Objects\n\n\ndef CheckIn(**KeyWordArguments):\n \"\"\"Check-in IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CheckInCommand)\n return output, error\n\n\ndef CheckInMutiple(**KeyWordArguments):\n thisproc = 'CheckInMultiple'\n \"\"\" Check in Multiple IDQ components \"\"\"\n for key, value in KeyWordArguments.items():\n if key == 'MultipleObjectPaths':\n ObjectPaths = KeyWordArguments['MultipleObjectPaths']\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommands = []\n for ObjectPathName in ObjectPaths:\n KeyWordArguments['ObjectPathName'] = ObjectPathName\n CheckInCommands.append(buildCommand.build(**KeyWordArguments))\n CheckInAllCommand = '\\n'.join(CheckInCommands)\n timebefore = datetime.datetime.now()\n output, error = executeInfacmd.execute(CheckInAllCommand)\n timeafter = datetime.datetime.now()\n duration = timeafter - timebefore\n supporting.log(logging.DEBUG, thisproc, 'Infacmd took ' + str(duration) +\n ' seconds to check-in ' + str(len(ObjectPaths)) + ' objects')\n return output, error\n\n\ndef create_iar_file(**KeyWordArguments):\n thisproc = 'create_iar_file'\n KeyWordArguments['Tool'] = 'CreateIAR'\n create_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n create_command + '<.')\n result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)\n return result\n\n\ndef deploy_iar_file(**KeyWordArguments):\n thisproc = 'deploy_iar_file'\n KeyWordArguments['Tool'] = 'DeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef redeploy_iar_file(**KeyWordArguments):\n thisproc = 'redeploy_iar_file'\n KeyWordArguments['Tool'] = 'RedeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef stop_app(**KeyWordArguments):\n thisproc = 'stop_app'\n KeyWordArguments['Tool'] = 'StopApp'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef set_app_privileges(**KeyWordArguments):\n thisproc = 'set_app_privileges'\n KeyWordArguments['Tool'] = 'AppPrivileges'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n", "step-4": "<mask token>\nimport datetime\nimport supporting, logging\nfrom cicd.informatica import buildCommand\nfrom cicd.informatica import executeInfacmd\nfrom cicd.informatica import infaConstants as constants\nlogger = logging.getLogger(__name__)\n\n\ndef import_infadeveloper(**KeyWordArguments):\n \"\"\"Import IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'Import'\n ImportCommand = buildCommand.build(**KeyWordArguments)\n result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)\n return result\n\n\ndef export_infadeveloper(**KeyWordArguments):\n thisproc = 'export_infadeveloper'\n KeyWordArguments['Tool'] = 'Export'\n ExportCommand = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'ExportCommand is >' +\n ExportCommand + '<.')\n result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)\n return result\n\n\ndef CreateFolder(**KeyWordArguments):\n \"\"\"Create IDQ Folder\"\"\"\n KeyWordArguments['Tool'] = 'CreateFolder'\n CreateFolder = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CreateFolder)\n return output, error\n\n\ndef ListCheckedOutObjects(**KeyWordArguments):\n thisproc = 'ListCheckedOutObjects'\n \"\"\" List Components that are currently checked out \"\"\"\n KeyWordArguments['Tool'] = 'ListCheckOutObjects'\n ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(ListCheckedOutCommand)\n OutputLines = output.splitlines()\n OutputKeyValuePairLines = [Properties.split(', ') for Properties in\n OutputLines if Properties.startswith('MRS_PATH=')]\n Objects = [dict(KVPair.split('=') for KVPair in Line if len(KVPair.\n split('=')) == 2) for Line in OutputKeyValuePairLines]\n supporting.log(logger, logging.DEBUG, thisproc, output)\n return Objects\n\n\ndef CheckIn(**KeyWordArguments):\n \"\"\"Check-in IDQ Components\"\"\"\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CheckInCommand)\n return output, error\n\n\ndef CheckInMutiple(**KeyWordArguments):\n thisproc = 'CheckInMultiple'\n \"\"\" Check in Multiple IDQ components \"\"\"\n for key, value in KeyWordArguments.items():\n if key == 'MultipleObjectPaths':\n ObjectPaths = KeyWordArguments['MultipleObjectPaths']\n KeyWordArguments['Tool'] = 'CheckIn'\n CheckInCommands = []\n for ObjectPathName in ObjectPaths:\n KeyWordArguments['ObjectPathName'] = ObjectPathName\n CheckInCommands.append(buildCommand.build(**KeyWordArguments))\n CheckInAllCommand = '\\n'.join(CheckInCommands)\n timebefore = datetime.datetime.now()\n output, error = executeInfacmd.execute(CheckInAllCommand)\n timeafter = datetime.datetime.now()\n duration = timeafter - timebefore\n supporting.log(logging.DEBUG, thisproc, 'Infacmd took ' + str(duration) +\n ' seconds to check-in ' + str(len(ObjectPaths)) + ' objects')\n return output, error\n\n\ndef create_iar_file(**KeyWordArguments):\n thisproc = 'create_iar_file'\n KeyWordArguments['Tool'] = 'CreateIAR'\n create_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n create_command + '<.')\n result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)\n return result\n\n\ndef deploy_iar_file(**KeyWordArguments):\n thisproc = 'deploy_iar_file'\n KeyWordArguments['Tool'] = 'DeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef redeploy_iar_file(**KeyWordArguments):\n thisproc = 'redeploy_iar_file'\n KeyWordArguments['Tool'] = 'RedeployIAR'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef stop_app(**KeyWordArguments):\n thisproc = 'stop_app'\n KeyWordArguments['Tool'] = 'StopApp'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n\n\ndef set_app_privileges(**KeyWordArguments):\n thisproc = 'set_app_privileges'\n KeyWordArguments['Tool'] = 'AppPrivileges'\n deploy_command = buildCommand.build(**KeyWordArguments)\n supporting.log(logger, logging.INFO, thisproc, 'Command is >' +\n deploy_command + '<.')\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n return result\n", "step-5": "\"\"\"IDQ Importer Exporter\n\nThis script defines Import and Export functions through which it can communicate with\na Informatica Model Repository.\n\nIt also provides some related functions, such as:\n\t- Create IDQ folder\n\t- Check in IDQ components\n\n Parts by Laurens Verhoeven\n Parts by Jac. Beekers\n @Version: 20190412.0 - JBE - Initial version to work with deploy lists\n @License: MIT\n\"\"\"\n\n# MIT License\n#\n# Copyright (c) 2019 Jac. Beekers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport datetime\nimport supporting, logging\n\nfrom cicd.informatica import buildCommand\nfrom cicd.informatica import executeInfacmd\nfrom cicd.informatica import infaConstants as constants\n\nlogger = logging.getLogger(__name__)\n\n\ndef import_infadeveloper(**KeyWordArguments):\n \"\"\"Import IDQ Components\"\"\"\n\n KeyWordArguments[\"Tool\"] = \"Import\"\n ImportCommand = buildCommand.build(**KeyWordArguments)\n\n result = executeInfacmd.execute(ImportCommand, constants.DEPLOYARTIFACT)\n\n return result\n\n\ndef export_infadeveloper(**KeyWordArguments):\n thisproc = \"export_infadeveloper\"\n\n KeyWordArguments[\"Tool\"] = \"Export\"\n ExportCommand = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"ExportCommand is >\" + ExportCommand + \"<.\")\n result = executeInfacmd.execute(ExportCommand, constants.CREATEARTIFACT)\n\n return result\n\n\ndef CreateFolder(**KeyWordArguments):\n \"\"\"Create IDQ Folder\"\"\"\n\n KeyWordArguments[\"Tool\"] = \"CreateFolder\"\n\n CreateFolder = buildCommand.build(**KeyWordArguments)\n\n output, error = executeInfacmd.execute(CreateFolder)\n\n return (output, error)\n\n\ndef ListCheckedOutObjects(**KeyWordArguments):\n thisproc = \"ListCheckedOutObjects\"\n \"\"\" List Components that are currently checked out \"\"\"\n\n KeyWordArguments[\"Tool\"] = \"ListCheckOutObjects\"\n ListCheckedOutCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(ListCheckedOutCommand)\n\n # The output is in the form of one object per line, with properties spearated by a comma + space.\n # To filter out irrelevant lines, such as \"Command succesful\", we keep only line that start with \"MRS_PATH=\"\n OutputLines = output.splitlines()\n OutputKeyValuePairLines = [Properties.split(\", \") for Properties in OutputLines if\n Properties.startswith(\"MRS_PATH=\")]\n\n # ObjectsOLD = [[KVPair.split(\"=\", 1) for KVPair in Line] for Line in OutputKeyValuePairLines]\n\n # Each object is a dictionary, with properties as keys\n # Since the date field has a comma in it, its not parsed properly. For this reason we need the len == 2 filter\n # If the date is required, the parsing of the output should be adjusted\n Objects = [dict(KVPair.split(\"=\") for KVPair in Line if len(KVPair.split(\"=\")) == 2) for Line in\n OutputKeyValuePairLines]\n\n supporting.log(logger, logging.DEBUG, thisproc, output)\n\n return Objects\n\n\ndef CheckIn(**KeyWordArguments):\n \"\"\"Check-in IDQ Components\"\"\"\n\n KeyWordArguments[\"Tool\"] = \"CheckIn\"\n CheckInCommand = buildCommand.build(**KeyWordArguments)\n output, error = executeInfacmd.execute(CheckInCommand)\n\n return (output, error)\n\n\ndef CheckInMutiple(**KeyWordArguments):\n thisproc = \"CheckInMultiple\"\n \"\"\" Check in Multiple IDQ components \"\"\"\n for key, value in KeyWordArguments.items():\n if key == \"MultipleObjectPaths\":\n ObjectPaths = KeyWordArguments[\"MultipleObjectPaths\"]\n\n KeyWordArguments[\"Tool\"] = \"CheckIn\"\n\n CheckInCommands = []\n for ObjectPathName in ObjectPaths:\n KeyWordArguments[\"ObjectPathName\"] = ObjectPathName\n CheckInCommands.append(buildCommand.build(**KeyWordArguments))\n\n CheckInAllCommand = \"\\n\".join(CheckInCommands)\n\n timebefore = datetime.datetime.now()\n output, error = executeInfacmd.execute(CheckInAllCommand)\n timeafter = datetime.datetime.now()\n duration = timeafter - timebefore\n\n supporting.log(logging.DEBUG, thisproc,\n \"Infacmd took \" + str(duration) + \" seconds to check-in \" + str(len(ObjectPaths)) + \" objects\")\n\n # output, error = (CheckInAllCommand, 0)\n\n return (output, error)\n\n\ndef create_iar_file(**KeyWordArguments):\n thisproc = \"create_iar_file\"\n\n KeyWordArguments[\"Tool\"] = \"CreateIAR\"\n create_command = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"Command is >\" + create_command + \"<.\")\n result = executeInfacmd.execute(create_command, constants.CREATEARTIFACT)\n\n return result\n\n\ndef deploy_iar_file(**KeyWordArguments):\n thisproc = \"deploy_iar_file\"\n\n KeyWordArguments[\"Tool\"] = \"DeployIAR\"\n deploy_command = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"Command is >\" + deploy_command + \"<.\")\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n\n return result\n\n\ndef redeploy_iar_file(**KeyWordArguments):\n thisproc = \"redeploy_iar_file\"\n\n KeyWordArguments[\"Tool\"] = \"RedeployIAR\"\n deploy_command = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"Command is >\" + deploy_command + \"<.\")\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n\n return result\n\n\ndef stop_app(**KeyWordArguments):\n thisproc = \"stop_app\"\n\n KeyWordArguments[\"Tool\"] = \"StopApp\"\n deploy_command = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"Command is >\" + deploy_command + \"<.\")\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n\n return result\n\ndef set_app_privileges(**KeyWordArguments):\n thisproc = \"set_app_privileges\"\n\n KeyWordArguments[\"Tool\"] = \"AppPrivileges\"\n deploy_command = buildCommand.build(**KeyWordArguments)\n\n supporting.log(logger, logging.INFO, thisproc, \"Command is >\" + deploy_command + \"<.\")\n result = executeInfacmd.execute(deploy_command, constants.DEPLOYARTIFACT)\n\n return result\n", "step-ids": [ 10, 11, 12, 13, 14 ] }
[ 10, 11, 12, 13, 14 ]
from django.shortcuts import render from django_filters.rest_framework import DjangoFilterBackend from django.views.decorators.csrf import csrf_exempt from rest_framework.parsers import JSONParser from django.http import JsonResponse, Http404 from .serializers import * from .models import * from .filter import * from rest_framework import generics from rest_framework.filters import SearchFilter, OrderingFilter # Create your views here. @csrf_exempt def TBGRApi(request, tbgrno=0): if request.method == 'GET': tbgrs = TBGR.objects.all() tbgrs_serializer = TBGRSerializer(tbgrs, many=True) return JsonResponse(tbgrs_serializer.data, safe=False) elif request.method == 'POST': tbgr_data = JSONParser().parse(request) tbgr_serializer = TBGRSerializer(data=tbgr_data) if tbgr_serializer.is_valid(): tbgr_serializer.save() return JsonResponse("Added Successfully!!", safe=False) return JsonResponse("Failed to Add.", safe=False) elif request.method == 'PUT': tbgr_data = JSONParser().parse(request) tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno']) tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data) if tbgr_serializer.is_valid(): tbgr_serializer.save() return JsonResponse("Updated Successfully!!", safe=False) return JsonResponse("Failed to Update.", safe=False) elif request.method == 'DELETE': tbgr = TBGR.objects.get(tbgrno=tbgrno) tbgr.delete() return JsonResponse("Deleted Succeffully!!", safe=False) @csrf_exempt def BoardApi(request): if request.method=='GET': boards = Board.objects.all() boards_serializer = BoardSerializer(boards, many=True) return JsonResponse(boards_serializer.data, safe=False) @csrf_exempt def VillageApi(request, villageid=0): if request.method == 'GET': villages = Village.objects.all() villages_serializer = VillageSerializer(villages, many=True) return JsonResponse(villages_serializer.data, safe=False) elif request.method == 'POST': village_data = JSONParser().parse(request) village_serializer = VillageSerializer(data=village_data) if village_serializer.is_valid(): village_serializer.save() return JsonResponse("Added Successfully!!", safe=False) return JsonResponse("Failed to Add.", safe=False) elif request.method == 'PUT': village_data = JSONParser().parse(request) village = Village.objects.get(villageid=village_data['villageid']) village_serializer = VillageSerializer(village, data=village_data) if village_serializer.is_valid(): village_serializer.save() return JsonResponse("Updated Successfully!!", safe=False) return JsonResponse("Failed to Update.", safe=False) elif request.method == 'DELETE': village = Village.objects.get(villageid=villageid) village.delete() return JsonResponse("Deleted Succeffully!!", safe=False) @csrf_exempt def SlipApi(request, lotno=0): if request.method == 'GET': slips = Slip.objects.all() slips_serializer = SlipSerializer(slips, many=True) return JsonResponse(slips_serializer.data, safe=False) elif request.method == 'POST': slip_data = JSONParser().parse(request) slip_serializer = SlipSerializer(data=slip_data) if slip_serializer.is_valid(): slip_serializer.save() return JsonResponse("Added Successfully!!", safe=False) return JsonResponse("Failed to Add.", safe=False) # elif request.method == 'POST': # slip_data = JSONParser().parse(request) # slips = Slip.objects.all() # if slip_data['lotno']: # slips = slips.filter(lotno=slip_data['lotno']) # if slip_data['tbgrno']: # slips = slips.filter(tbgrno=slip_data['tbgrno']) # if slip_data['grade']: # slips = slips.filter(grade=slip_data['grade']) # slips_serializer = SlipSerializer(slips, many=True) # return JsonResponse(slips_serializer.data, safe=False) elif request.method == 'PUT': slip_data = JSONParser().parse(request) slip = Slip.objects.get(lotno=slip_data['lotno']) slip_serializer = SlipSerializer(slip, data=slip_data) if slip_serializer.is_valid(): slip_serializer.save() return JsonResponse("Updated Successfully!!", safe=False) return JsonResponse("Failed to Update.", safe=False) elif request.method == 'DELETE': slip = Slip.objects.get(lotno=lotno) slip.delete() return JsonResponse("Deleted Succeffully!!", safe=False) @csrf_exempt def GradeApi(request): if request.method == 'GET': grades = Grades.objects.all() grades_serializer = GradeSerializer(grades, many=True) return JsonResponse(grades_serializer.data, safe=False) @csrf_exempt def ContactApi(request, phone=0): if request.method == 'GET': contacts = Contacts.objects.all() contacts_serializer = ContactSerializer(contacts, many=True) return JsonResponse(contacts_serializer.data, safe=False) elif request.method == 'POST': contact_data = JSONParser().parse(request) contact_serializer = ContactSerializer(data=contact_data) if contact_serializer.is_valid(): contact_serializer.save() return JsonResponse("Added Successfully!!", safe=False) return JsonResponse("Failed to Add.", safe=False, status=404) elif request.method == 'PUT': contact_data = JSONParser().parse(request) contact = Contacts.objects.get(phone=contact_data['phone']) contact_serializer = ContactSerializer(contact, data=contact_data) if contact_serializer.is_valid(): contact_serializer.save() return JsonResponse("Updated Successfully!!", safe=False) return JsonResponse("Failed to Update.", safe=False) elif request.method == 'DELETE': contact = Contacts.objects.get(phone=phone) contact.delete() return JsonResponse("Deleted Succeffully!!", safe=False)
normal
{ "blob_id": "e0c6fb414d87c0a6377538089226e37b044edc70", "index": 8383, "step-1": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n", "step-2": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n", "step-3": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n", "step-4": "from django.shortcuts import render\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http import JsonResponse, Http404\nfrom .serializers import *\nfrom .models import *\nfrom .filter import *\nfrom rest_framework import generics\nfrom rest_framework.filters import SearchFilter, OrderingFilter\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef BoardApi(request):\n if request.method == 'GET':\n boards = Board.objects.all()\n boards_serializer = BoardSerializer(boards, many=True)\n return JsonResponse(boards_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n", "step-5": "from django.shortcuts import render\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http import JsonResponse, Http404\nfrom .serializers import *\nfrom .models import *\nfrom .filter import *\nfrom rest_framework import generics\nfrom rest_framework.filters import SearchFilter, OrderingFilter\n\n# Create your views here.\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\ndef BoardApi(request):\n if request.method=='GET':\n boards = Board.objects.all()\n boards_serializer = BoardSerializer(boards, many=True)\n return JsonResponse(boards_serializer.data, safe=False)\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\n\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n # elif request.method == 'POST':\n # slip_data = JSONParser().parse(request)\n # slips = Slip.objects.all()\n # if slip_data['lotno']:\n # slips = slips.filter(lotno=slip_data['lotno'])\n # if slip_data['tbgrno']:\n # slips = slips.filter(tbgrno=slip_data['tbgrno'])\n # if slip_data['grade']:\n # slips = slips.filter(grade=slip_data['grade'])\n # slips_serializer = SlipSerializer(slips, many=True)\n # return JsonResponse(slips_serializer.data, safe=False)\n\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False, status=404)\n\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n", "step-ids": [ 3, 4, 5, 7, 8 ] }
[ 3, 4, 5, 7, 8 ]
#!/usr/bin/env python from __future__ import print_function, division, unicode_literals import os import sys import json import logging import tempfile import itertools import traceback import subprocess as sp from os.path import basename from datetime import datetime from argparse import ArgumentParser, FileType PREPROC_CMDS = { 'exon': "awk '$3 == \"exon\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"exon\";print}}' > {output}", 'gene': "awk '$3 == \"gene\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"gene\";print}}' > {output}", 'intron': "subtractBed -a {input[0]} -b {input[1]} | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF)=\"intron\";print}}' > {output}", 'intergenic': "complementBed -i {input[0]} -g <(cut -f 1-2 {input[1]} | sort -k1,1) | awk 'BEGIN{{OFS=\"\\t\"}}{{$(NF+1)=\"intergenic\";print}}' > {output}" } def strfdelta(tdelta, fmt): d = {"days": tdelta.days} d["hours"], rem = divmod(tdelta.seconds, 3600) d["minutes"], d["seconds"] = divmod(rem, 60) return fmt.format(**d) def preprocess(element, inputs=None): '''element can be one of <gene> <exon> <intron> <intergenic>''' log = logging.getLogger('gencov') element_bed = tempfile.mkstemp(suffix='.bed')[1] if not inputs: inputs = [ args.annotation ] else: inputs = inputs[element] command = PREPROC_CMDS[element].format(input=inputs, output=element_bed) log.debug(command) proc = sp.Popen(command, shell=True, executable='/bin/bash', stderr=sp.PIPE) err_msg = proc.communicate()[1] if err_msg: raise IOError(err_msg) log.info("%s preprocessed" % element.title()) return element_bed def gtf_processing(genome=None, prefix='gencov'): """Annotation preprocessing. Provide a bed file with the following elements: - projected exons - projected genes - introns - integenic regions """ all_bed = prefix + ".all.bed" if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0: log.info("Preprocessing annotation...") features = ('exon', 'gene', 'intron', 'intergenic') merged_exons, merged_genes = map(preprocess, features[:2]) ins = { 'intron': [merged_genes, merged_exons], 'intergenic': [merged_genes, genome] } intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins]) log.info("Concatenate bed files for all elements...") with open(all_bed, 'w') as out_bed: cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed) for f in (merged_exons, merged_genes, intron_bed, intergenic_bed): os.remove(f) return all_bed def cat_all(*args, **kwargs): out_bed = kwargs.get('out_bed', sys.stdout) for bed in args: print(open(bed,'r').read(), end='', file=out_bed) def get_chromosomes(genome_file): with open(genome_file) as genome: chrs = [l.split()[0] for l in genome] return chrs def process_bam(bam, all_elements, chrs=None, all_reads=False): if not os.path.exists(bam): raise IOError("Fail to open {0!r} for reading".format(bam)) bai = "{0}.bai".format(bam) if chrs and not os.path.exists(bai): log.info("Indexing {0}...".format(bam)) sp.call('samtools index {0}'.format(bam), shell=True) log.info('Processing {0}...'.format(bam)) command = "samtools view -u" sam_filter = 4 if not all_reads: sam_filter += 256 command += " -F {0} {1}".format(str(sam_filter), bam) if chrs: command += " {0}".format(" ".join(chrs)) command = "{0} | bamToBed -i stdin -tag NH -bed12 | intersectBed -a stdin -b {1} -split -wao".format(command, all_elements) log.debug(command) return sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=1) def update_counts(element, tot_counts, cont_counts, split_counts, is_split): elem='total' tot_counts[elem] = tot_counts.get(elem,0) + 1 if is_split: split_counts['total'] = split_counts.get('total',0) + 1 if len(element) > 1: if len(set(element)) == 1: elem = element[0] else: if 'intergenic' in element: elem = 'others' else: elem = 'exonic_intronic' else: elem = element[0] split_counts[elem] = split_counts.get(elem, 0) + 1 else: cont_counts['total'] = cont_counts.get('total', 0) + 1 if len(element) > 1: if 'intergenic' in element: elem = 'others' else: elem = 'exonic_intronic' else: elem = element[0] cont_counts[elem] = cont_counts.get(elem, 0) + 1 def count_features(bed, uniq=False): # Initialize n_skipped = {} newRead = False # keep track of different reads prev_rid = None # read id of the previous read is_split = False # check if current read is a split element = [] # list with all elements intersecting the read cont_counts = {} # Continuous read counts split_counts = {} # Split read counts tot_counts = {} # Total number of reads o = bed.stdout log.info("Compute genomic coverage...") # Iterate while True: try: line = o.next() if not line: n_skipped['empty'] = n_skipped.get('gene', 0) + 1 continue if 'gene' in line: n_skipped['gene'] = n_skipped.get('gene', 0) + 1 continue rchr, rstart, rend, rid, rflag, rstrand, rtstart, rtend, rrgb, rbcount, rbsizes, rbstarts, achr, astart, aend, ael, covg = line.strip().split("\t") if uniq and int(rflag) != 1: n_skipped['non-uniq'] = n_skipped.get('non-uniq', 0) + 1 continue newRead = (rid != prev_rid) if (newRead) and prev_rid!=None: update_counts(element, tot_counts, cont_counts, split_counts, is_split) # Re-Initialize the counters element = [] element.append(ael) prev_rid = rid is_split = int(rbcount) > 1 except StopIteration: update_counts(element, tot_counts, cont_counts, split_counts, is_split) break for k,v in n_skipped.iteritems(): log.info("Skipped {1} {0} lines".format(k, v)) return (tot_counts, cont_counts, split_counts) def write_output(stats, out, output_format='tsv', json_indent=4): if not args.ID: args.ID = basename(args.bam) if output_format == 'tsv': for k, v in stats.iteritems(): for k1, v1 in v.iteritems(): line_array = [args.ID, k, str(k1), str(v1)] out.write("\t".join(line_array)+"\n") elif output_format == 'json': out.write('Total reads: {0}\n'.format(json.dumps(stats['total'], indent=json_indent))) out.write('Continuous reads: {0}\n'.format(json.dumps(stats['continuous'], indent=json_indent))) out.write('Split reads: {0}\n'.format(json.dumps(stats['split'], indent=json_indent))) def main(args): bn_bam = os.path.basename(args.bam).rsplit(".", 1)[0] bn_gtf = os.path.basename(args.annotation).rsplit(".", 1)[0] start = datetime.now() all_elements = gtf_processing(genome=args.genome, prefix=bn_bam + "." + bn_gtf) chrs = None if args.all_chrs else get_chromosomes(args.genome) if args.uniq: args.all_reads = False bed = process_bam(args.bam, all_elements, chrs=chrs, all_reads=args.all_reads) read_type = "UNIQ" if args.uniq else "ALL" if args.all_reads else "PRIMARY" chroms = ", ".join(chrs) if chrs else "ALL" log.info("Chromosomes: {0}".format(str(chroms))) log.info("Mapped reads: {0}".format(str(read_type))) tot, cont, split = count_features(bed, uniq=args.uniq) stats_summary = {"total" : tot, "continuous" : cont, "split" : split} write_output(stats_summary, args.output, output_format=args.output_format) end = datetime.now() - start log.info('DONE ({0})'.format(strfdelta(end, "{hours}h{minutes}m{seconds}s"))) if not args.keep: os.remove(all_elements) def parse_arguments(argv): """ Parsing arguments """ parser = ArgumentParser(argv, description = "Count the number of reads in genomic regions. NOTE: SAMtools and BEDtools must be installed") parser.add_argument("-a", "--annotation", type=str, help="gtf with all elements (genes, transcripts and exons)", required=True) parser.add_argument("-g", "--genome", type=str, help="genome chromosome sizes", required=True) parser.add_argument("-b", "--bam", type=str, help="bam file", required=True) parser.add_argument("-o", "--output", type=FileType('w'), default=sys.stdout, help="output file name") parser.add_argument("-I", "--ID", type=str, help="the ID of the experiment, from which the bam comes from") parser.add_argument("--keep", dest='keep', help="Do not delete the temporary files generated during the run", action='store_true', default=False) parser.add_argument("--uniq", dest='uniq', action='store_true', help="Only use uniquely mapped reads", default=False) parser.add_argument("--loglevel", dest='loglevel', help="Set the loglevel", default="info") parser.add_argument("--all-reads", dest='all_reads', action='store_true', help="Use all reads from the BAM file. Default: use primary alignments only ('samtools view -F 260')", default=False) parser.add_argument("--output-format", dest='output_format', help="Set the output format", default="tsv") parser.add_argument("--all-chromosomes", dest='all_chrs', action='store_true', help="Use all chromosomes from the BAM file header. Default: use only chromosomes in the genome index file.", default=False) return parser.parse_args() def setup_logger(): """ Logging setup """ log = logging.getLogger("gencov") log.setLevel(logging.getLevelName(args.loglevel.upper())) ch = logging.StreamHandler() ch.setLevel = log.level fmt = logging.Formatter('%(asctime)s - %(message)s', '%Y-%m-%d %H:%M:%S') ch.setFormatter(fmt) log.addHandler(ch) return log if __name__ == "__main__": """ Given a bam file, compute the read coverage for different genomic regions: - exons - introns - exon-intron junctions - intergenic *** ONLY PRIMARY alignments are used *** """ try: args = parse_arguments(sys.argv) log = setup_logger() main(args) exit(0) except Exception,err: log.error("Error:") errinfo = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback) log.error("".join(errinfo)) exit(1)
normal
{ "blob_id": "ac19ae96d8262cadd43314c29198fccbc008c1b5", "index": 6590, "step-1": "#!/usr/bin/env python\n\nfrom __future__ import print_function, division, unicode_literals\nimport os\nimport sys\nimport json\nimport logging\nimport tempfile\nimport itertools\nimport traceback\nimport subprocess as sp\nfrom os.path import basename\nfrom datetime import datetime\nfrom argparse import ArgumentParser, FileType\n\nPREPROC_CMDS = {\n 'exon': \"awk '$3 == \\\"exon\\\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"exon\\\";print}}' > {output}\",\n 'gene': \"awk '$3 == \\\"gene\\\"' {input[0]} | sort -k1,1 -k4,4n | mergeBed -i stdin | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"gene\\\";print}}' > {output}\",\n 'intron': \"subtractBed -a {input[0]} -b {input[1]} | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF)=\\\"intron\\\";print}}' > {output}\",\n 'intergenic': \"complementBed -i {input[0]} -g <(cut -f 1-2 {input[1]} | sort -k1,1) | awk 'BEGIN{{OFS=\\\"\\\\t\\\"}}{{$(NF+1)=\\\"intergenic\\\";print}}' > {output}\"\n}\n\ndef strfdelta(tdelta, fmt):\n d = {\"days\": tdelta.days}\n d[\"hours\"], rem = divmod(tdelta.seconds, 3600)\n d[\"minutes\"], d[\"seconds\"] = divmod(rem, 60)\n return fmt.format(**d)\n\ndef preprocess(element, inputs=None):\n '''element can be one of <gene> <exon> <intron> <intergenic>'''\n log = logging.getLogger('gencov')\n element_bed = tempfile.mkstemp(suffix='.bed')[1]\n if not inputs:\n inputs = [ args.annotation ]\n else:\n inputs = inputs[element]\n command = PREPROC_CMDS[element].format(input=inputs, output=element_bed)\n\n log.debug(command)\n proc = sp.Popen(command, shell=True, executable='/bin/bash', stderr=sp.PIPE)\n err_msg = proc.communicate()[1]\n if err_msg:\n raise IOError(err_msg)\n\n log.info(\"%s preprocessed\" % element.title())\n return element_bed\n\ndef gtf_processing(genome=None, prefix='gencov'):\n \"\"\"Annotation preprocessing. Provide a bed file with the\n following elements:\n\n - projected exons\n - projected genes\n - introns\n - integenic regions\n\n \"\"\"\n all_bed = prefix + \".all.bed\"\n\n if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:\n log.info(\"Preprocessing annotation...\")\n features = ('exon', 'gene', 'intron', 'intergenic')\n merged_exons, merged_genes = map(preprocess, features[:2])\n ins = {\n 'intron': [merged_genes, merged_exons],\n 'intergenic': [merged_genes, genome]\n }\n intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])\n\n log.info(\"Concatenate bed files for all elements...\")\n with open(all_bed, 'w') as out_bed:\n cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)\n\n for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):\n os.remove(f)\n\n return all_bed\n\ndef cat_all(*args, **kwargs):\n out_bed = kwargs.get('out_bed', sys.stdout)\n for bed in args:\n print(open(bed,'r').read(), end='', file=out_bed)\n\ndef get_chromosomes(genome_file):\n with open(genome_file) as genome:\n chrs = [l.split()[0] for l in genome]\n return chrs\n\ndef process_bam(bam, all_elements, chrs=None, all_reads=False):\n if not os.path.exists(bam):\n raise IOError(\"Fail to open {0!r} for reading\".format(bam))\n bai = \"{0}.bai\".format(bam)\n if chrs and not os.path.exists(bai):\n log.info(\"Indexing {0}...\".format(bam))\n sp.call('samtools index {0}'.format(bam), shell=True)\n\n log.info('Processing {0}...'.format(bam))\n command = \"samtools view -u\"\n sam_filter = 4\n if not all_reads:\n sam_filter += 256\n command += \" -F {0} {1}\".format(str(sam_filter), bam)\n if chrs:\n command += \" {0}\".format(\" \".join(chrs))\n command = \"{0} | bamToBed -i stdin -tag NH -bed12 | intersectBed -a stdin -b {1} -split -wao\".format(command, all_elements)\n log.debug(command)\n return sp.Popen(command, shell=True, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=1)\n\ndef update_counts(element, tot_counts, cont_counts, split_counts, is_split):\n elem='total'\n tot_counts[elem] = tot_counts.get(elem,0) + 1\n if is_split:\n split_counts['total'] = split_counts.get('total',0) + 1\n if len(element) > 1:\n if len(set(element)) == 1:\n elem = element[0]\n else:\n if 'intergenic' in element:\n elem = 'others'\n else:\n elem = 'exonic_intronic'\n else:\n elem = element[0]\n\n split_counts[elem] = split_counts.get(elem, 0) + 1\n\n else:\n cont_counts['total'] = cont_counts.get('total', 0) + 1\n if len(element) > 1:\n if 'intergenic' in element:\n elem = 'others'\n else:\n elem = 'exonic_intronic'\n else:\n elem = element[0]\n\n cont_counts[elem] = cont_counts.get(elem, 0) + 1\n\ndef count_features(bed, uniq=False):\n\n # Initialize\n n_skipped = {}\n newRead = False # keep track of different reads\n prev_rid = None # read id of the previous read\n is_split = False # check if current read is a split\n element = [] # list with all elements intersecting the read\n cont_counts = {} # Continuous read counts\n split_counts = {} # Split read counts\n tot_counts = {} # Total number of reads\n\n o = bed.stdout\n\n log.info(\"Compute genomic coverage...\")\n\n # Iterate\n while True:\n try:\n line = o.next()\n if not line:\n n_skipped['empty'] = n_skipped.get('gene', 0) + 1\n continue\n if 'gene' in line:\n n_skipped['gene'] = n_skipped.get('gene', 0) + 1\n continue\n rchr, rstart, rend, rid, rflag, rstrand, rtstart, rtend, rrgb, rbcount, rbsizes, rbstarts, achr, astart, aend, ael, covg = line.strip().split(\"\\t\")\n if uniq and int(rflag) != 1:\n n_skipped['non-uniq'] = n_skipped.get('non-uniq', 0) + 1\n continue\n newRead = (rid != prev_rid)\n if (newRead) and prev_rid!=None:\n update_counts(element, tot_counts, cont_counts, split_counts, is_split)\n # Re-Initialize the counters\n element = []\n\n element.append(ael)\n prev_rid = rid\n is_split = int(rbcount) > 1\n except StopIteration:\n update_counts(element, tot_counts, cont_counts, split_counts, is_split)\n break\n\n for k,v in n_skipped.iteritems():\n log.info(\"Skipped {1} {0} lines\".format(k, v))\n\n return (tot_counts, cont_counts, split_counts)\n\ndef write_output(stats, out, output_format='tsv', json_indent=4):\n if not args.ID:\n args.ID = basename(args.bam)\n\n if output_format == 'tsv':\n for k, v in stats.iteritems():\n for k1, v1 in v.iteritems():\n line_array = [args.ID, k, str(k1), str(v1)]\n out.write(\"\\t\".join(line_array)+\"\\n\")\n elif output_format == 'json':\n out.write('Total reads: {0}\\n'.format(json.dumps(stats['total'], indent=json_indent)))\n out.write('Continuous reads: {0}\\n'.format(json.dumps(stats['continuous'], indent=json_indent)))\n out.write('Split reads: {0}\\n'.format(json.dumps(stats['split'], indent=json_indent)))\n\ndef main(args):\n\n bn_bam = os.path.basename(args.bam).rsplit(\".\", 1)[0]\n bn_gtf = os.path.basename(args.annotation).rsplit(\".\", 1)[0]\n\n start = datetime.now()\n\n all_elements = gtf_processing(genome=args.genome, prefix=bn_bam + \".\" + bn_gtf)\n\n chrs = None if args.all_chrs else get_chromosomes(args.genome)\n if args.uniq:\n args.all_reads = False\n bed = process_bam(args.bam, all_elements, chrs=chrs, all_reads=args.all_reads)\n\n read_type = \"UNIQ\" if args.uniq else \"ALL\" if args.all_reads else \"PRIMARY\"\n chroms = \", \".join(chrs) if chrs else \"ALL\"\n log.info(\"Chromosomes: {0}\".format(str(chroms)))\n log.info(\"Mapped reads: {0}\".format(str(read_type)))\n tot, cont, split = count_features(bed, uniq=args.uniq)\n\n stats_summary = {\"total\" : tot, \"continuous\" : cont, \"split\" : split}\n\n write_output(stats_summary, args.output, output_format=args.output_format)\n\n end = datetime.now() - start\n log.info('DONE ({0})'.format(strfdelta(end, \"{hours}h{minutes}m{seconds}s\")))\n\n if not args.keep:\n os.remove(all_elements)\n\ndef parse_arguments(argv):\n \"\"\" Parsing arguments \"\"\"\n\n parser = ArgumentParser(argv, description = \"Count the number of reads in genomic regions. NOTE: SAMtools and BEDtools must be installed\")\n parser.add_argument(\"-a\", \"--annotation\", type=str, help=\"gtf with all elements (genes, transcripts and exons)\", required=True)\n parser.add_argument(\"-g\", \"--genome\", type=str, help=\"genome chromosome sizes\", required=True)\n parser.add_argument(\"-b\", \"--bam\", type=str, help=\"bam file\", required=True)\n parser.add_argument(\"-o\", \"--output\", type=FileType('w'), default=sys.stdout, help=\"output file name\")\n parser.add_argument(\"-I\", \"--ID\", type=str, help=\"the ID of the experiment, from which the bam comes from\")\n parser.add_argument(\"--keep\", dest='keep', help=\"Do not delete the temporary files generated during the run\", action='store_true', default=False)\n parser.add_argument(\"--uniq\", dest='uniq', action='store_true', help=\"Only use uniquely mapped reads\", default=False)\n parser.add_argument(\"--loglevel\", dest='loglevel', help=\"Set the loglevel\", default=\"info\")\n parser.add_argument(\"--all-reads\", dest='all_reads', action='store_true', help=\"Use all reads from the BAM file. Default: use primary alignments only ('samtools view -F 260')\", default=False)\n parser.add_argument(\"--output-format\", dest='output_format', help=\"Set the output format\", default=\"tsv\")\n parser.add_argument(\"--all-chromosomes\", dest='all_chrs', action='store_true', help=\"Use all chromosomes from the BAM file header. Default: use only chromosomes in the genome index file.\", default=False)\n\n return parser.parse_args()\n\ndef setup_logger():\n \"\"\" Logging setup \"\"\"\n log = logging.getLogger(\"gencov\")\n log.setLevel(logging.getLevelName(args.loglevel.upper()))\n ch = logging.StreamHandler()\n ch.setLevel = log.level\n fmt = logging.Formatter('%(asctime)s - %(message)s', '%Y-%m-%d %H:%M:%S')\n ch.setFormatter(fmt)\n log.addHandler(ch)\n return log\n\nif __name__ == \"__main__\":\n \"\"\"\n Given a bam file, compute the read coverage for different genomic regions:\n\n - exons\n - introns\n - exon-intron junctions\n - intergenic\n\n *** ONLY PRIMARY alignments are used ***\n \"\"\"\n try:\n args = parse_arguments(sys.argv)\n log = setup_logger()\n main(args)\n exit(0)\n except Exception,err:\n log.error(\"Error:\")\n errinfo = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)\n log.error(\"\".join(errinfo))\n exit(1)\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class World(pyglet.window.Window): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 self.rquad = 0.0 self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1 / 60.0) def update(self, dt): self.DrawGLScene() def on_draw(self): self.DrawGLScene() <|reserved_special_token_0|> def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable(GL_BLEND) def InitGL(self, Width, Height): glClearColor(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glShadeModel(GL_SMOOTH) glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) specLight0 = [0.5, 0.5, 0.5, 1.0] glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0) glMaterialfv(GL_FRONT, GL_SHININESS, 10.0) glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) <|reserved_special_token_0|> def DrawHUD(self, basicT=(0, 0, 0)): glMatrixMode(GL_MODELVIEW) pyglet.gl.glColor4f(0.0, 1, 0, 1.0) glEnable(GL_LINE_SMOOTH) glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth(3) pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305)) ) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) glRotatef(self.rquad, 0.0, 1.0, 0.0) glColor3f(1.0, 1.0, 1.0) glPointSize(3.0) glBegin(GL_QUADS) glVertex3f(-1.0, 1.0, 0.0) glVertex3f(1.0, 1.0, 0.0) glVertex3f(1.0, -1.0, 0.0) glVertex3f(-1.0, -1.0, 0.0) glEnd() def DrawGLScene(self): global rtri, rquad glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = 1, 1, 1 self.DrawHUD(basicT) glLoadIdentity() glTranslatef(15.0, -5, -50.0) glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0) glCallList(self.obj.gl_list) self.rquad = self.rquad + 1.3 def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class World(pyglet.window.Window): objfile1 = 'resource/predator.obj' objfile2 = 'resource/A10.obj' obj = OBJ(objfile1) def __init__(self): config = Config(sample_buffers=1, samples=4, depth_size=16, double_buffer=True) try: super(World, self).__init__(resizable=True, config=config) except: super(World, self).__init__(resizable=True) self.setup() def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 self.rquad = 0.0 self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1 / 60.0) def update(self, dt): self.DrawGLScene() def on_draw(self): self.DrawGLScene() def on_resize(self, w, h): self.ReSizeGLScene(w, h) def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable(GL_BLEND) def InitGL(self, Width, Height): glClearColor(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glShadeModel(GL_SMOOTH) glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) specLight0 = [0.5, 0.5, 0.5, 1.0] glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0) glMaterialfv(GL_FRONT, GL_SHININESS, 10.0) glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) def ReSizeGLScene(self, Width, Height): if Height == 0: Height = 1 glViewport(0, 0, Width, Height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) def DrawHUD(self, basicT=(0, 0, 0)): glMatrixMode(GL_MODELVIEW) pyglet.gl.glColor4f(0.0, 1, 0, 1.0) glEnable(GL_LINE_SMOOTH) glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth(3) pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305)) ) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) glRotatef(self.rquad, 0.0, 1.0, 0.0) glColor3f(1.0, 1.0, 1.0) glPointSize(3.0) glBegin(GL_QUADS) glVertex3f(-1.0, 1.0, 0.0) glVertex3f(1.0, 1.0, 0.0) glVertex3f(1.0, -1.0, 0.0) glVertex3f(-1.0, -1.0, 0.0) glEnd() def DrawGLScene(self): global rtri, rquad glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = 1, 1, 1 self.DrawHUD(basicT) glLoadIdentity() glTranslatef(15.0, -5, -50.0) glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0) glCallList(self.obj.gl_list) self.rquad = self.rquad + 1.3 def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class World(pyglet.window.Window): objfile1 = 'resource/predator.obj' objfile2 = 'resource/A10.obj' obj = OBJ(objfile1) def __init__(self): config = Config(sample_buffers=1, samples=4, depth_size=16, double_buffer=True) try: super(World, self).__init__(resizable=True, config=config) except: super(World, self).__init__(resizable=True) self.setup() def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 self.rquad = 0.0 self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1 / 60.0) def update(self, dt): self.DrawGLScene() def on_draw(self): self.DrawGLScene() def on_resize(self, w, h): self.ReSizeGLScene(w, h) def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable(GL_BLEND) def InitGL(self, Width, Height): glClearColor(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glShadeModel(GL_SMOOTH) glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) specLight0 = [0.5, 0.5, 0.5, 1.0] glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0) glMaterialfv(GL_FRONT, GL_SHININESS, 10.0) glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) def ReSizeGLScene(self, Width, Height): if Height == 0: Height = 1 glViewport(0, 0, Width, Height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) def DrawHUD(self, basicT=(0, 0, 0)): glMatrixMode(GL_MODELVIEW) pyglet.gl.glColor4f(0.0, 1, 0, 1.0) glEnable(GL_LINE_SMOOTH) glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth(3) pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305)) ) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) glRotatef(self.rquad, 0.0, 1.0, 0.0) glColor3f(1.0, 1.0, 1.0) glPointSize(3.0) glBegin(GL_QUADS) glVertex3f(-1.0, 1.0, 0.0) glVertex3f(1.0, 1.0, 0.0) glVertex3f(1.0, -1.0, 0.0) glVertex3f(-1.0, -1.0, 0.0) glEnd() def DrawGLScene(self): global rtri, rquad glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = 1, 1, 1 self.DrawHUD(basicT) glLoadIdentity() glTranslatef(15.0, -5, -50.0) glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0) glCallList(self.obj.gl_list) self.rquad = self.rquad + 1.3 def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') default_size = 1024, 768 screen_size1 = 640, 480 if __name__ == '__main__': window = World() window.set_location(10, 30) window.set_size(*screen_size1) pyglet.app.run() <|reserved_special_token_1|> import pyglet from pyglet.gl import * from pyglet.window import key from OpenGL.GLUT import * from objloader import * from numpy import sin class World(pyglet.window.Window): objfile1 = 'resource/predator.obj' objfile2 = 'resource/A10.obj' obj = OBJ(objfile1) def __init__(self): config = Config(sample_buffers=1, samples=4, depth_size=16, double_buffer=True) try: super(World, self).__init__(resizable=True, config=config) except: super(World, self).__init__(resizable=True) self.setup() def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 self.rquad = 0.0 self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1 / 60.0) def update(self, dt): self.DrawGLScene() def on_draw(self): self.DrawGLScene() def on_resize(self, w, h): self.ReSizeGLScene(w, h) def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable(GL_BLEND) def InitGL(self, Width, Height): glClearColor(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glShadeModel(GL_SMOOTH) glMatrixMode(GL_PROJECTION) glLoadIdentity() glMatrixMode(GL_MODELVIEW) specLight0 = [0.5, 0.5, 0.5, 1.0] glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0) glMaterialfv(GL_FRONT, GL_SHININESS, 10.0) glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) def ReSizeGLScene(self, Width, Height): if Height == 0: Height = 1 glViewport(0, 0, Width, Height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) def DrawHUD(self, basicT=(0, 0, 0)): glMatrixMode(GL_MODELVIEW) pyglet.gl.glColor4f(0.0, 1, 0, 1.0) glEnable(GL_LINE_SMOOTH) glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth(3) pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305)) ) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) glRotatef(self.rquad, 0.0, 1.0, 0.0) glColor3f(1.0, 1.0, 1.0) glPointSize(3.0) glBegin(GL_QUADS) glVertex3f(-1.0, 1.0, 0.0) glVertex3f(1.0, 1.0, 0.0) glVertex3f(1.0, -1.0, 0.0) glVertex3f(-1.0, -1.0, 0.0) glEnd() def DrawGLScene(self): global rtri, rquad glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = 1, 1, 1 self.DrawHUD(basicT) glLoadIdentity() glTranslatef(15.0, -5, -50.0) glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0) glCallList(self.obj.gl_list) self.rquad = self.rquad + 1.3 def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') default_size = 1024, 768 screen_size1 = 640, 480 if __name__ == '__main__': window = World() window.set_location(10, 30) window.set_size(*screen_size1) pyglet.app.run() <|reserved_special_token_1|> #!/usr/bin/env python #lesson4.py # See original source and C based tutorial at http://nehe.gamedev.net #This code was created by Richard Campbell '99 #(ported to Python/PyOpenGL by John Ferguson 2000) #John Ferguson at hakuin@voicenet.com #Code ported for use with pyglet by Jess Hill (Jestermon) 2009 #jestermon.weebly.com #jestermonster@gmail.com #because these lessons sometimes need openGL GLUT, you need to install #pyonlgl as well as pyglet, in order for this sample them to work #pyopengl ~ http://pyopengl.sourceforge.net #pyglet ~ http://www.pyglet.org import pyglet from pyglet.gl import * from pyglet.window import key from OpenGL.GLUT import * #<<<==Needed for GLUT calls from objloader import * from numpy import sin ##################################World class World(pyglet.window.Window): #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ objfile1 = 'resource/predator.obj' objfile2 = 'resource/A10.obj' # objfile = 'resource/complex2.obj' obj = OBJ(objfile1) # obj2 = OBJ(objfile2) def __init__(self): config = Config(sample_buffers=1, samples=4, depth_size=16, double_buffer=True,) try: super(World, self).__init__(resizable=True, config=config) except: super(World, self).__init__(resizable=True) self.setup() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 # (was global) self.rquad = 0.0 # (was global) self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def update(self,dt): self.DrawGLScene() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_draw(self): self.DrawGLScene() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_resize(self,w,h): self.ReSizeGLScene(w,h) def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable (GL_BLEND) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # A general OpenGL initialization function. Sets all of the initial parameters. def InitGL(self,Width, Height): # We call this right after our OpenGL window is created. glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The background Color To Black # glClearColor(0.0, 0.0, 0.5, 1.0) # This Will Clear The background Color To Black glClearDepth(1.0) # Enables Clearing Of The Depth Buffer glDepthFunc(GL_LESS) # The Type Of Depth Test To Do glEnable(GL_DEPTH_TEST) # Enables Depth Testing glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading glMatrixMode(GL_PROJECTION) glLoadIdentity() # Reset The Projection Matrix # Calculate The Aspect Ratio Of The Window #(pyglet initializes the screen so we ignore this call) #gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) # for realisitic light diffusion effect specLight0 = [0.5, 0.5, 0.5, 1.0]; glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0); glMaterialfv(GL_FRONT, GL_SHININESS, 10.0); glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens,dens,dens, 0.0)) # glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) # # glutFullScreenToggle() # self.MakeTransparent() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The function called when our window is resized (which shouldn't happen if you enable fullscreen, below) def ReSizeGLScene(self,Width, Height): if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small Height = 1 glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) def DrawHUD(self,basicT=(0,0,0)): # glMatrixMode(GL_PROJECTION) # glLoadIdentity() # glOrtho ( 0, 640, 480, 0, 0, 1 ) glMatrixMode(GL_MODELVIEW) # glTranslatef(0, 0, -30.0) pyglet.gl.glColor4f(0.0,1,0,1.0) glEnable (GL_LINE_SMOOTH); glHint (GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth (3) pyglet.graphics.draw ( 2, pyglet.gl.GL_LINES, ('v2i',(10, 15, 300, 305)) ) # glClear(GL_COLOR_BUFFER_BIT) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) # Draw a square (quadrilateral) rotated on the X axis. glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate glColor3f(1.0, 1.0, 1.0) # Bluish shade glPointSize(3.0) glBegin(GL_QUADS) # Start drawing a 4 sided polygon glVertex3f(-1.0, 1.0, 0.0) # Top Left glVertex3f(1.0, 1.0, 0.0) # Top Right glVertex3f(1.0, -1.0, 0.0) # Bottom Right glVertex3f(-1.0, -1.0, 0.0) # Bottom Left glEnd() # We are done with the polygon #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The main drawing function. def DrawGLScene(self): global rtri, rquad # Clear The Screen And The Depth Buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = (1,1,1) self.DrawHUD(basicT) glLoadIdentity() # Reset The View glTranslatef(15.0, -5, -50.0) # glTranslatef(15.0, 2*sin(self.rquad/50.)-5, -50.0) glRotatef(20*sin(self.rquad/20.), 0.1, 0.1, -1.0) # Rotate glCallList(self.obj.gl_list) # --------------------------------------------------------------------------------- # We are "undoing" the rotation so that we may rotate the quad on its own axis. # We also "undo" the prior translate. # This could also have been done using the matrix stack. # # # glLoadIdentity() # # # glTranslatef(-15.0, 0.0, -50.0) # # # glRotatef(self.rquad, 0.1, -1.0, 0.0) # Rotate # # # glCallList(self.obj2.gl_list) # glLoadIdentity() # # Move Right 1.5 units and into the screen 6.0 units. # glTranslatef(1.0, 1.0, -6.0) # # # Draw a square (quadrilateral) rotated on the X axis. # glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate # glColor3f(0.3, 0.5, 1.0) # Bluish shade # glBegin(GL_QUADS) # Start drawing a 4 sided polygon # glVertex3f(-1.0, 1.0, 0.0) # Top Left # glVertex3f(1.0, 1.0, 0.0) # Top Right # glVertex3f(1.0, -1.0, 0.0) # Bottom Right # glVertex3f(-1.0, -1.0, 0.0) # Bottom Left # glEnd() # We are done with the polygon # What values to use? Well, if you have a FAST machine and a FAST 3D Card, then # large values make an unpleasant display with flickering and tearing. I found that # smaller values work better, but this was based on my experience. #(2009.. 9 years after this code was written, this still applies.. unless you use) #(a timed display, as done here with pyglet.clock.schedule_interval(self.update, 1/60.0) #updates at 60Hz) # self.rtri = self.rtri + 1.0 # Increase The Rotation Variable For The Triangle self.rquad = self.rquad + 1.3 # Decrease The Rotation Variable For The Quad #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') # since this is double buffered, swap the buffers to display what just got drawn. #(pyglet provides the swap, so we dont use the swap here) #glutSwapBuffers() default_size = 1024,768 screen_size1 = 640,480 if __name__ == "__main__": window = World() window.set_location(10,30) window.set_size(*screen_size1) # window.set_fullscreen(True) pyglet.app.run()
flexible
{ "blob_id": "5fc097518b6069131e1ca58fa885c6ad45ae143c", "index": 4741, "step-1": "<mask token>\n\n\nclass World(pyglet.window.Window):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n <mask token>\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n <mask token>\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\ndefault_size = 1024, 768\nscreen_size1 = 640, 480\nif __name__ == '__main__':\n window = World()\n window.set_location(10, 30)\n window.set_size(*screen_size1)\n pyglet.app.run()\n", "step-4": "import pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\nfrom OpenGL.GLUT import *\nfrom objloader import *\nfrom numpy import sin\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\ndefault_size = 1024, 768\nscreen_size1 = 640, 480\nif __name__ == '__main__':\n window = World()\n window.set_location(10, 30)\n window.set_size(*screen_size1)\n pyglet.app.run()\n", "step-5": "#!/usr/bin/env python\n#lesson4.py\n\n# See original source and C based tutorial at http://nehe.gamedev.net\n#This code was created by Richard Campbell '99\n\n#(ported to Python/PyOpenGL by John Ferguson 2000)\n#John Ferguson at hakuin@voicenet.com\n\n#Code ported for use with pyglet by Jess Hill (Jestermon) 2009\n#jestermon.weebly.com\n#jestermonster@gmail.com\n\n#because these lessons sometimes need openGL GLUT, you need to install\n#pyonlgl as well as pyglet, in order for this sample them to work\n#pyopengl ~ http://pyopengl.sourceforge.net\n#pyglet ~ http://www.pyglet.org\n\nimport pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\nfrom OpenGL.GLUT import * #<<<==Needed for GLUT calls\nfrom objloader import *\nfrom numpy import sin\n\n##################################World\nclass World(pyglet.window.Window):\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n objfile1 = 'resource/predator.obj' \n objfile2 = 'resource/A10.obj' \n # objfile = 'resource/complex2.obj' \n obj = OBJ(objfile1)\n # obj2 = OBJ(objfile2)\n def __init__(self):\n config = Config(sample_buffers=1, samples=4,\n depth_size=16, double_buffer=True,)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0 # (was global)\n self.rquad = 0.0 # (was global)\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def update(self,dt):\n self.DrawGLScene()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_draw(self):\n self.DrawGLScene()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_resize(self,w,h):\n self.ReSizeGLScene(w,h)\n\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) \n glEnable (GL_BLEND) \n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # A general OpenGL initialization function. Sets all of the initial parameters.\n def InitGL(self,Width, Height): # We call this right after our OpenGL window is created.\n glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The background Color To Black\n # glClearColor(0.0, 0.0, 0.5, 1.0) # This Will Clear The background Color To Black\n glClearDepth(1.0) # Enables Clearing Of The Depth Buffer\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n # Calculate The Aspect Ratio Of The Window\n #(pyglet initializes the screen so we ignore this call)\n #gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n # for realisitic light diffusion effect\n specLight0 = [0.5, 0.5, 0.5, 1.0];\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0);\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0);\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n\n dens = 0.3 \n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens,dens,dens, 0.0))\n # glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 0.0))\n\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n # # glutFullScreenToggle()\n\n # self.MakeTransparent()\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)\n def ReSizeGLScene(self,Width, Height):\n if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small\n Height = 1\n glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self,basicT=(0,0,0)):\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # glOrtho ( 0, 640, 480, 0, 0, 1 )\n\n glMatrixMode(GL_MODELVIEW)\n # glTranslatef(0, 0, -30.0)\n pyglet.gl.glColor4f(0.0,1,0,1.0) \n glEnable (GL_LINE_SMOOTH); \n glHint (GL_LINE_SMOOTH_HINT, GL_DONT_CARE) \n glLineWidth (3) \n pyglet.graphics.draw ( 2, pyglet.gl.GL_LINES, ('v2i',(10, 15, 300, 305)) )\n\n # glClear(GL_COLOR_BUFFER_BIT)\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n\n # Draw a square (quadrilateral) rotated on the X axis.\n glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate\n glColor3f(1.0, 1.0, 1.0) # Bluish shade\n glPointSize(3.0)\n\n\n\n glBegin(GL_QUADS) # Start drawing a 4 sided polygon\n glVertex3f(-1.0, 1.0, 0.0) # Top Left\n glVertex3f(1.0, 1.0, 0.0) # Top Right\n glVertex3f(1.0, -1.0, 0.0) # Bottom Right\n glVertex3f(-1.0, -1.0, 0.0) # Bottom Left\n glEnd() # We are done with the polygon\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # The main drawing function.\n def DrawGLScene(self):\n global rtri, rquad\n\n # Clear The Screen And The Depth Buffer\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n basicT = (1,1,1)\n\n self.DrawHUD(basicT)\n\n glLoadIdentity() # Reset The View\n glTranslatef(15.0, -5, -50.0)\n # glTranslatef(15.0, 2*sin(self.rquad/50.)-5, -50.0)\n glRotatef(20*sin(self.rquad/20.), 0.1, 0.1, -1.0) # Rotate\n glCallList(self.obj.gl_list)\n\n# ---------------------------------------------------------------------------------\n # We are \"undoing\" the rotation so that we may rotate the quad on its own axis.\n # We also \"undo\" the prior translate. \n # This could also have been done using the matrix stack.\n\n # # # glLoadIdentity()\n # # # glTranslatef(-15.0, 0.0, -50.0)\n # # # glRotatef(self.rquad, 0.1, -1.0, 0.0) # Rotate\n # # # glCallList(self.obj2.gl_list)\n\n # glLoadIdentity()\n # # Move Right 1.5 units and into the screen 6.0 units.\n # glTranslatef(1.0, 1.0, -6.0)\n# \n # # Draw a square (quadrilateral) rotated on the X axis.\n # glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate\n # glColor3f(0.3, 0.5, 1.0) # Bluish shade\n # glBegin(GL_QUADS) # Start drawing a 4 sided polygon\n # glVertex3f(-1.0, 1.0, 0.0) # Top Left\n # glVertex3f(1.0, 1.0, 0.0) # Top Right\n # glVertex3f(1.0, -1.0, 0.0) # Bottom Right\n # glVertex3f(-1.0, -1.0, 0.0) # Bottom Left\n # glEnd() # We are done with the polygon\n\n # What values to use? Well, if you have a FAST machine and a FAST 3D Card, then\n # large values make an unpleasant display with flickering and tearing. I found that\n # smaller values work better, but this was based on my experience.\n #(2009.. 9 years after this code was written, this still applies.. unless you use)\n #(a timed display, as done here with pyglet.clock.schedule_interval(self.update, 1/60.0) #updates at 60Hz)\n # self.rtri = self.rtri + 1.0 # Increase The Rotation Variable For The Triangle\n self.rquad = self.rquad + 1.3 # Decrease The Rotation Variable For The Quad\n\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n # since this is double buffered, swap the buffers to display what just got drawn.\n #(pyglet provides the swap, so we dont use the swap here)\n #glutSwapBuffers()\n\ndefault_size = 1024,768 \nscreen_size1 = 640,480 \nif __name__ == \"__main__\":\n window = World()\n window.set_location(10,30)\n window.set_size(*screen_size1)\n # window.set_fullscreen(True)\n pyglet.app.run()\n\n\n\n\n\n", "step-ids": [ 9, 13, 15, 16, 17 ] }
[ 9, 13, 15, 16, 17 ]
<|reserved_special_token_0|> class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) def run(): users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2) ] a = sorted(users, key=operator.attrgetter('id', 'name')) print(a) b = sorted(users, key=lambda r: (r.id, r.name)) print(b) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) def run(): users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2) ] a = sorted(users, key=operator.attrgetter('id', 'name')) print(a) b = sorted(users, key=lambda r: (r.id, r.name)) print(b) if __name__ == '__main__': run() <|reserved_special_token_1|> <|reserved_special_token_0|> import operator class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) def run(): users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2) ] a = sorted(users, key=operator.attrgetter('id', 'name')) print(a) b = sorted(users, key=lambda r: (r.id, r.name)) print(b) if __name__ == '__main__': run() <|reserved_special_token_1|> """ 对自定义的类进行排序 """ import operator class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) def run(): users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)] # 这种方式相对速度快,也适用于min/max等 a = sorted(users, key=operator.attrgetter('id', 'name')) print(a) b = sorted(users, key=lambda r: (r.id, r.name)) print(b) if __name__ == '__main__': run()
flexible
{ "blob_id": "e8ef3a5e41e68b4d219aa1403be392c51cc010e6", "index": 7302, "step-1": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, id):\n self.name = name\n self.id = id\n\n def __repr__(self):\n return 'User({},{})'.format(self.name, self.id)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, id):\n self.name = name\n self.id = id\n\n def __repr__(self):\n return 'User({},{})'.format(self.name, self.id)\n\n\ndef run():\n users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)\n ]\n a = sorted(users, key=operator.attrgetter('id', 'name'))\n print(a)\n b = sorted(users, key=lambda r: (r.id, r.name))\n print(b)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass User:\n\n def __init__(self, name, id):\n self.name = name\n self.id = id\n\n def __repr__(self):\n return 'User({},{})'.format(self.name, self.id)\n\n\ndef run():\n users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)\n ]\n a = sorted(users, key=operator.attrgetter('id', 'name'))\n print(a)\n b = sorted(users, key=lambda r: (r.id, r.name))\n print(b)\n\n\nif __name__ == '__main__':\n run()\n", "step-4": "<mask token>\nimport operator\n\n\nclass User:\n\n def __init__(self, name, id):\n self.name = name\n self.id = id\n\n def __repr__(self):\n return 'User({},{})'.format(self.name, self.id)\n\n\ndef run():\n users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)\n ]\n a = sorted(users, key=operator.attrgetter('id', 'name'))\n print(a)\n b = sorted(users, key=lambda r: (r.id, r.name))\n print(b)\n\n\nif __name__ == '__main__':\n run()\n", "step-5": "\"\"\"\n 对自定义的类进行排序\n\"\"\"\nimport operator\n\n\nclass User:\n def __init__(self, name, id):\n self.name = name\n self.id = id\n\n def __repr__(self):\n return 'User({},{})'.format(self.name, self.id)\n\n\ndef run():\n users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)]\n # 这种方式相对速度快,也适用于min/max等\n a = sorted(users, key=operator.attrgetter('id', 'name'))\n print(a)\n b = sorted(users, key=lambda r: (r.id, r.name))\n print(b)\n\n\nif __name__ == '__main__':\n run()", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open(filename, 'a') as handle: handle.write(str(current_time)) handle.write('\n') <|reserved_special_token_1|> <|reserved_special_token_0|> filename = 'record_time.txt' current_time = time.strftime('%a %H:%M:%S') with open(filename, 'a') as handle: handle.write(str(current_time)) handle.write('\n') <|reserved_special_token_1|> <|reserved_special_token_0|> import time filename = 'record_time.txt' current_time = time.strftime('%a %H:%M:%S') with open(filename, 'a') as handle: handle.write(str(current_time)) handle.write('\n') <|reserved_special_token_1|> """ Writes day of the week and time to a file. Script written for crontab tutorial. Author: Jessica Yung 2016 """ import time filename = "record_time.txt" # Records time in format Sun 10:00:00 current_time = time.strftime('%a %H:%M:%S') # Append output to file. 'a' is append mode. with open(filename, 'a') as handle: # Write (Append) output to a line handle.write(str(current_time)) # Newline to separate different lines of output handle.write('\n')
flexible
{ "blob_id": "1f0695f0e9745912d8ee3a87e6c9b1272e9ebbae", "index": 218, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n", "step-3": "<mask token>\nfilename = 'record_time.txt'\ncurrent_time = time.strftime('%a %H:%M:%S')\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n", "step-4": "<mask token>\nimport time\nfilename = 'record_time.txt'\ncurrent_time = time.strftime('%a %H:%M:%S')\nwith open(filename, 'a') as handle:\n handle.write(str(current_time))\n handle.write('\\n')\n", "step-5": "\"\"\"\nWrites day of the week and time to a file.\n\nScript written for crontab tutorial.\n\nAuthor: Jessica Yung 2016\n\n\"\"\"\nimport time\n\nfilename = \"record_time.txt\"\n\n# Records time in format Sun 10:00:00\ncurrent_time = time.strftime('%a %H:%M:%S')\n\n# Append output to file. 'a' is append mode.\nwith open(filename, 'a') as handle:\n\t# Write (Append) output to a line\n handle.write(str(current_time))\n # Newline to separate different lines of output\n handle.write('\\n')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import cv2 as cv import numpy as np from servo import * from func import * #import threading #import dlib # import socket # import struct # import pickle def constrain(val, minv, maxv): return min(maxv, max(minv, val)) KP = 0.22 KI = 0 KD = 0.17 last = 0 integral = 0 # constants SIZE = (400, 300) RECT = np.float32([[0, 299], [399, 299], [399, 0], [0, 0]]) TRAP = np.float32([[0, 299], [399, 299], [320, 200], [80, 200]]) TRAPINT = np.array(TRAP, dtype=np.int32) cap = cv.VideoCapture(0) pi, ESC, STEER = setup_gpio() p = False control(pi, ESC, 1500, STEER, 90) time.sleep(1) timeout = 0 l = 1 r = 0 povor = 0 totl = 1 pid=0 while True: try: ret, frame = cap.read() totl = frame.copy() #print(totl) cv.imwrite('home\\pi\\imaaage1.jpg', totl) #print('totl ready') img = cv.resize(frame, SIZE) binary = binarize(img) perspective = trans_perspective(binary, TRAP, RECT, SIZE) cv.imwrite('home\\pi\\imaaage2.jpg', perspective) if detect_stop(perspective): stop(pi, ESC) time.sleep(0.5) #control(pi, ESC, 1548, STEER, 90) #time.sleep(1) p = True continue left, right = find_left_right(perspective) if p: way = input("Куда ехать хозяин?\n") if way == "2": control(pi, ESC, 1545, STEER, 90) time.sleep(4) p = False continue elif way == "3": control(pi, ESC, 1545, STEER, 90) time.sleep(1) control(pi, ESC, 1545, STEER, 145) time.sleep(3.2) control(pi, ESC, 1545, STEER, 90) time.sleep(2) elif way == "1": ... p = False err = 0 - ((left + right) // 2 - 200) if abs(right - left) < 100: err = last #print(err) pid = KP * err + KD * (err - last) + KI * integral last = err integral += err integral = constrain(integral, -10, 10) control(pi, ESC, 1545, STEER, 90 + pid) print(pid) if detect_stop(perspective): stop(pi, ESC) time.sleep(3) control(pi, ESC, 1548, STEER, 90) time.sleep(1) p = True time.sleep(0.01) # if cv.waitKey(1) & 0xFF == ord('q'): # break except KeyboardInterrupt as e: control(pi, ESC, 1500, STEER, 90) print(e) break # cv.destroyAllWindows() cap.release()
normal
{ "blob_id": "3ccbafbdc84447438c194288b1409e332bb2b479", "index": 3630, "step-1": "<mask token>\n\n\ndef constrain(val, minv, maxv):\n return min(maxv, max(minv, val))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef constrain(val, minv, maxv):\n return min(maxv, max(minv, val))\n\n\n<mask token>\ncontrol(pi, ESC, 1500, STEER, 90)\ntime.sleep(1)\n<mask token>\nwhile True:\n try:\n ret, frame = cap.read()\n totl = frame.copy()\n cv.imwrite('home\\\\pi\\\\imaaage1.jpg', totl)\n img = cv.resize(frame, SIZE)\n binary = binarize(img)\n perspective = trans_perspective(binary, TRAP, RECT, SIZE)\n cv.imwrite('home\\\\pi\\\\imaaage2.jpg', perspective)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(0.5)\n p = True\n continue\n left, right = find_left_right(perspective)\n if p:\n way = input('Куда ехать хозяин?\\n')\n if way == '2':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(4)\n p = False\n continue\n elif way == '3':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(1)\n control(pi, ESC, 1545, STEER, 145)\n time.sleep(3.2)\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(2)\n elif way == '1':\n ...\n p = False\n err = 0 - ((left + right) // 2 - 200)\n if abs(right - left) < 100:\n err = last\n pid = KP * err + KD * (err - last) + KI * integral\n last = err\n integral += err\n integral = constrain(integral, -10, 10)\n control(pi, ESC, 1545, STEER, 90 + pid)\n print(pid)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(3)\n control(pi, ESC, 1548, STEER, 90)\n time.sleep(1)\n p = True\n time.sleep(0.01)\n except KeyboardInterrupt as e:\n control(pi, ESC, 1500, STEER, 90)\n print(e)\n break\ncap.release()\n", "step-3": "<mask token>\n\n\ndef constrain(val, minv, maxv):\n return min(maxv, max(minv, val))\n\n\nKP = 0.22\nKI = 0\nKD = 0.17\nlast = 0\nintegral = 0\nSIZE = 400, 300\nRECT = np.float32([[0, 299], [399, 299], [399, 0], [0, 0]])\nTRAP = np.float32([[0, 299], [399, 299], [320, 200], [80, 200]])\nTRAPINT = np.array(TRAP, dtype=np.int32)\ncap = cv.VideoCapture(0)\npi, ESC, STEER = setup_gpio()\np = False\ncontrol(pi, ESC, 1500, STEER, 90)\ntime.sleep(1)\ntimeout = 0\nl = 1\nr = 0\npovor = 0\ntotl = 1\npid = 0\nwhile True:\n try:\n ret, frame = cap.read()\n totl = frame.copy()\n cv.imwrite('home\\\\pi\\\\imaaage1.jpg', totl)\n img = cv.resize(frame, SIZE)\n binary = binarize(img)\n perspective = trans_perspective(binary, TRAP, RECT, SIZE)\n cv.imwrite('home\\\\pi\\\\imaaage2.jpg', perspective)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(0.5)\n p = True\n continue\n left, right = find_left_right(perspective)\n if p:\n way = input('Куда ехать хозяин?\\n')\n if way == '2':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(4)\n p = False\n continue\n elif way == '3':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(1)\n control(pi, ESC, 1545, STEER, 145)\n time.sleep(3.2)\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(2)\n elif way == '1':\n ...\n p = False\n err = 0 - ((left + right) // 2 - 200)\n if abs(right - left) < 100:\n err = last\n pid = KP * err + KD * (err - last) + KI * integral\n last = err\n integral += err\n integral = constrain(integral, -10, 10)\n control(pi, ESC, 1545, STEER, 90 + pid)\n print(pid)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(3)\n control(pi, ESC, 1548, STEER, 90)\n time.sleep(1)\n p = True\n time.sleep(0.01)\n except KeyboardInterrupt as e:\n control(pi, ESC, 1500, STEER, 90)\n print(e)\n break\ncap.release()\n", "step-4": "import cv2 as cv\nimport numpy as np\nfrom servo import *\nfrom func import *\n\n\ndef constrain(val, minv, maxv):\n return min(maxv, max(minv, val))\n\n\nKP = 0.22\nKI = 0\nKD = 0.17\nlast = 0\nintegral = 0\nSIZE = 400, 300\nRECT = np.float32([[0, 299], [399, 299], [399, 0], [0, 0]])\nTRAP = np.float32([[0, 299], [399, 299], [320, 200], [80, 200]])\nTRAPINT = np.array(TRAP, dtype=np.int32)\ncap = cv.VideoCapture(0)\npi, ESC, STEER = setup_gpio()\np = False\ncontrol(pi, ESC, 1500, STEER, 90)\ntime.sleep(1)\ntimeout = 0\nl = 1\nr = 0\npovor = 0\ntotl = 1\npid = 0\nwhile True:\n try:\n ret, frame = cap.read()\n totl = frame.copy()\n cv.imwrite('home\\\\pi\\\\imaaage1.jpg', totl)\n img = cv.resize(frame, SIZE)\n binary = binarize(img)\n perspective = trans_perspective(binary, TRAP, RECT, SIZE)\n cv.imwrite('home\\\\pi\\\\imaaage2.jpg', perspective)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(0.5)\n p = True\n continue\n left, right = find_left_right(perspective)\n if p:\n way = input('Куда ехать хозяин?\\n')\n if way == '2':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(4)\n p = False\n continue\n elif way == '3':\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(1)\n control(pi, ESC, 1545, STEER, 145)\n time.sleep(3.2)\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(2)\n elif way == '1':\n ...\n p = False\n err = 0 - ((left + right) // 2 - 200)\n if abs(right - left) < 100:\n err = last\n pid = KP * err + KD * (err - last) + KI * integral\n last = err\n integral += err\n integral = constrain(integral, -10, 10)\n control(pi, ESC, 1545, STEER, 90 + pid)\n print(pid)\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(3)\n control(pi, ESC, 1548, STEER, 90)\n time.sleep(1)\n p = True\n time.sleep(0.01)\n except KeyboardInterrupt as e:\n control(pi, ESC, 1500, STEER, 90)\n print(e)\n break\ncap.release()\n", "step-5": "import cv2 as cv\nimport numpy as np\nfrom servo import *\nfrom func import *\n#import threading\n#import dlib\n# import socket\n# import struct\n# import pickle\n\n\ndef constrain(val, minv, maxv):\n return min(maxv, max(minv, val))\n\nKP = 0.22\nKI = 0\nKD = 0.17\nlast = 0\nintegral = 0\n\n# constants\nSIZE = (400, 300)\n\nRECT = np.float32([[0, 299],\n [399, 299],\n [399, 0],\n [0, 0]])\n\nTRAP = np.float32([[0, 299],\n [399, 299],\n [320, 200],\n [80, 200]])\nTRAPINT = np.array(TRAP, dtype=np.int32)\n\ncap = cv.VideoCapture(0)\n\npi, ESC, STEER = setup_gpio()\np = False\ncontrol(pi, ESC, 1500, STEER, 90)\ntime.sleep(1)\ntimeout = 0\nl = 1\nr = 0\n\npovor = 0\ntotl = 1\n\npid=0\n\nwhile True:\n try:\n ret, frame = cap.read()\n totl = frame.copy()\n #print(totl)\n cv.imwrite('home\\\\pi\\\\imaaage1.jpg', totl)\n #print('totl ready')\n img = cv.resize(frame, SIZE)\n binary = binarize(img)\n\n perspective = trans_perspective(binary, TRAP, RECT, SIZE)\n cv.imwrite('home\\\\pi\\\\imaaage2.jpg', perspective)\n\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(0.5)\n #control(pi, ESC, 1548, STEER, 90)\n #time.sleep(1)\n p = True\n continue\n\n left, right = find_left_right(perspective)\n\n if p:\n way = input(\"Куда ехать хозяин?\\n\")\n\n if way == \"2\":\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(4)\n p = False\n continue\n elif way == \"3\":\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(1)\n control(pi, ESC, 1545, STEER, 145)\n time.sleep(3.2)\n control(pi, ESC, 1545, STEER, 90)\n time.sleep(2)\n elif way == \"1\":\n ...\n\n p = False\n\n err = 0 - ((left + right) // 2 - 200)\n\n if abs(right - left) < 100:\n err = last\n #print(err)\n pid = KP * err + KD * (err - last) + KI * integral\n last = err\n integral += err\n integral = constrain(integral, -10, 10)\n\n control(pi, ESC, 1545, STEER, 90 + pid)\n print(pid)\n\n if detect_stop(perspective):\n stop(pi, ESC)\n time.sleep(3)\n control(pi, ESC, 1548, STEER, 90)\n time.sleep(1)\n p = True\n\n\n\n time.sleep(0.01)\n\n # if cv.waitKey(1) & 0xFF == ord('q'):\n # break\n except KeyboardInterrupt as e:\n control(pi, ESC, 1500, STEER, 90)\n print(e)\n break\n\n# cv.destroyAllWindows()\ncap.release()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def exportVSSD(path, camName, wantTris=False, renderdata=None): mainFileDict = {} mainFilePath = path mainFileStem = os.path.basename(path)[:-5] mainFileDir = os.path.dirname(path) resolution = pmc.ls('defaultResolution')[0] renderWidth = resolution.width.get() renderHeight = resolution.height.get() if renderdata is not None: mainFileDict['render'] = {'width': renderWidth, 'height': renderHeight, 'spp': renderdata['spp']} cam = pmc.ls(camName)[0].getShape() mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam. getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight, 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam. upDirection(space='world')), 'look': list(cam.viewDirection(space= 'world'))} bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem)) geomList = pmc.ls(type='mesh', visible=True) mainFileGeoms = [] offset = 0 with open(bufPath, 'wb') as bufFd: for geom in geomList: print('Processing {}...'.format(geom)) smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0] isSmooth = smoothLevel > 1 print('Smooth level {}'.format(smoothLevel)) faceBuf = '' idxBuf = '' vtxBuf = '' nidxs = 0 for face in geom.f: vtxidxs = face.getVertices() nvtxidxs = len(vtxidxs) if not isSmooth and wantTris: if nvtxidxs > 3: print( 'Non-triangulated face. Triangulate before exporting' ) return else: faceBuf += struct.pack('<I', nvtxidxs) nidxs += nvtxidxs for vtxidx in vtxidxs: idxBuf += struct.pack('<I', vtxidx) for vertex in geom.vtx: p = vertex.getPosition('world') vtxBuf += struct.pack('<fff', p.x, p.y, p.z) hasCreases = False if isSmooth: edges = geom.edges creaseIdxBuf = '' creaseValBuf = '' creases = pmc.modeling.polyCrease(edges, q=True, v=0) for e in range(0, len(edges)): c = creases[e] if c > 0: hasCreases = True vtxs = edges[e].connectedVertices() creaseIdxBuf += struct.pack('<I', vtxs[0].index()) creaseIdxBuf += struct.pack('<I', vtxs[1].index()) creaseValBuf += struct.pack('<f', c) buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')] if not wantTris: buffers += [(faceBuf, 'faces')] if hasCreases: buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf, 'creasevalues')] buffersList = [] for b in buffers: print('Writing buffer {}'.format(b[1])) bufFd.write(b[0]) s = len(b[0]) buffersList.append({'offset': offset, 'size': s, 'type': b[1]}) offset += s sg = geom.connections(t='shadingEngine')[0] mat = sg.surfaceShader.connections()[0] albedo = mat.color.get() emittance = mat.incandescence.get() geomDict = {'triangles': wantTris, 'smooth': isSmooth, 'buffers': buffersList, 'material': {'albedo': list(albedo), 'emittance': list(emittance)}} mainFileGeoms.append(geomDict) mainFileDict['geometries'] = mainFileGeoms mainFileJson = json.dumps(mainFileDict, indent=2) with open(mainFilePath, 'w') as fd: fd.write(mainFileJson) print('Done') <|reserved_special_token_1|> import json import struct import pymel.core as pmc import os.path def exportVSSD(path, camName, wantTris=False, renderdata=None): mainFileDict = {} mainFilePath = path mainFileStem = os.path.basename(path)[:-5] mainFileDir = os.path.dirname(path) resolution = pmc.ls('defaultResolution')[0] renderWidth = resolution.width.get() renderHeight = resolution.height.get() if renderdata is not None: mainFileDict['render'] = {'width': renderWidth, 'height': renderHeight, 'spp': renderdata['spp']} cam = pmc.ls(camName)[0].getShape() mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam. getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight, 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam. upDirection(space='world')), 'look': list(cam.viewDirection(space= 'world'))} bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem)) geomList = pmc.ls(type='mesh', visible=True) mainFileGeoms = [] offset = 0 with open(bufPath, 'wb') as bufFd: for geom in geomList: print('Processing {}...'.format(geom)) smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0] isSmooth = smoothLevel > 1 print('Smooth level {}'.format(smoothLevel)) faceBuf = '' idxBuf = '' vtxBuf = '' nidxs = 0 for face in geom.f: vtxidxs = face.getVertices() nvtxidxs = len(vtxidxs) if not isSmooth and wantTris: if nvtxidxs > 3: print( 'Non-triangulated face. Triangulate before exporting' ) return else: faceBuf += struct.pack('<I', nvtxidxs) nidxs += nvtxidxs for vtxidx in vtxidxs: idxBuf += struct.pack('<I', vtxidx) for vertex in geom.vtx: p = vertex.getPosition('world') vtxBuf += struct.pack('<fff', p.x, p.y, p.z) hasCreases = False if isSmooth: edges = geom.edges creaseIdxBuf = '' creaseValBuf = '' creases = pmc.modeling.polyCrease(edges, q=True, v=0) for e in range(0, len(edges)): c = creases[e] if c > 0: hasCreases = True vtxs = edges[e].connectedVertices() creaseIdxBuf += struct.pack('<I', vtxs[0].index()) creaseIdxBuf += struct.pack('<I', vtxs[1].index()) creaseValBuf += struct.pack('<f', c) buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')] if not wantTris: buffers += [(faceBuf, 'faces')] if hasCreases: buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf, 'creasevalues')] buffersList = [] for b in buffers: print('Writing buffer {}'.format(b[1])) bufFd.write(b[0]) s = len(b[0]) buffersList.append({'offset': offset, 'size': s, 'type': b[1]}) offset += s sg = geom.connections(t='shadingEngine')[0] mat = sg.surfaceShader.connections()[0] albedo = mat.color.get() emittance = mat.incandescence.get() geomDict = {'triangles': wantTris, 'smooth': isSmooth, 'buffers': buffersList, 'material': {'albedo': list(albedo), 'emittance': list(emittance)}} mainFileGeoms.append(geomDict) mainFileDict['geometries'] = mainFileGeoms mainFileJson = json.dumps(mainFileDict, indent=2) with open(mainFilePath, 'w') as fd: fd.write(mainFileJson) print('Done')
flexible
{ "blob_id": "004a9cd0e459116bf3f88f3546ff4eded3dfb2a8", "index": 2512, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef exportVSSD(path, camName, wantTris=False, renderdata=None):\n mainFileDict = {}\n mainFilePath = path\n mainFileStem = os.path.basename(path)[:-5]\n mainFileDir = os.path.dirname(path)\n resolution = pmc.ls('defaultResolution')[0]\n renderWidth = resolution.width.get()\n renderHeight = resolution.height.get()\n if renderdata is not None:\n mainFileDict['render'] = {'width': renderWidth, 'height':\n renderHeight, 'spp': renderdata['spp']}\n cam = pmc.ls(camName)[0].getShape()\n mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam.\n getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight,\n 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam.\n upDirection(space='world')), 'look': list(cam.viewDirection(space=\n 'world'))}\n bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem))\n geomList = pmc.ls(type='mesh', visible=True)\n mainFileGeoms = []\n offset = 0\n with open(bufPath, 'wb') as bufFd:\n for geom in geomList:\n print('Processing {}...'.format(geom))\n smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0]\n isSmooth = smoothLevel > 1\n print('Smooth level {}'.format(smoothLevel))\n faceBuf = ''\n idxBuf = ''\n vtxBuf = ''\n nidxs = 0\n for face in geom.f:\n vtxidxs = face.getVertices()\n nvtxidxs = len(vtxidxs)\n if not isSmooth and wantTris:\n if nvtxidxs > 3:\n print(\n 'Non-triangulated face. Triangulate before exporting'\n )\n return\n else:\n faceBuf += struct.pack('<I', nvtxidxs)\n nidxs += nvtxidxs\n for vtxidx in vtxidxs:\n idxBuf += struct.pack('<I', vtxidx)\n for vertex in geom.vtx:\n p = vertex.getPosition('world')\n vtxBuf += struct.pack('<fff', p.x, p.y, p.z)\n hasCreases = False\n if isSmooth:\n edges = geom.edges\n creaseIdxBuf = ''\n creaseValBuf = ''\n creases = pmc.modeling.polyCrease(edges, q=True, v=0)\n for e in range(0, len(edges)):\n c = creases[e]\n if c > 0:\n hasCreases = True\n vtxs = edges[e].connectedVertices()\n creaseIdxBuf += struct.pack('<I', vtxs[0].index())\n creaseIdxBuf += struct.pack('<I', vtxs[1].index())\n creaseValBuf += struct.pack('<f', c)\n buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')]\n if not wantTris:\n buffers += [(faceBuf, 'faces')]\n if hasCreases:\n buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf,\n 'creasevalues')]\n buffersList = []\n for b in buffers:\n print('Writing buffer {}'.format(b[1]))\n bufFd.write(b[0])\n s = len(b[0])\n buffersList.append({'offset': offset, 'size': s, 'type': b[1]})\n offset += s\n sg = geom.connections(t='shadingEngine')[0]\n mat = sg.surfaceShader.connections()[0]\n albedo = mat.color.get()\n emittance = mat.incandescence.get()\n geomDict = {'triangles': wantTris, 'smooth': isSmooth,\n 'buffers': buffersList, 'material': {'albedo': list(albedo),\n 'emittance': list(emittance)}}\n mainFileGeoms.append(geomDict)\n mainFileDict['geometries'] = mainFileGeoms\n mainFileJson = json.dumps(mainFileDict, indent=2)\n with open(mainFilePath, 'w') as fd:\n fd.write(mainFileJson)\n print('Done')\n", "step-3": "import json\nimport struct\nimport pymel.core as pmc\nimport os.path\n\n\ndef exportVSSD(path, camName, wantTris=False, renderdata=None):\n mainFileDict = {}\n mainFilePath = path\n mainFileStem = os.path.basename(path)[:-5]\n mainFileDir = os.path.dirname(path)\n resolution = pmc.ls('defaultResolution')[0]\n renderWidth = resolution.width.get()\n renderHeight = resolution.height.get()\n if renderdata is not None:\n mainFileDict['render'] = {'width': renderWidth, 'height':\n renderHeight, 'spp': renderdata['spp']}\n cam = pmc.ls(camName)[0].getShape()\n mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam.\n getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight,\n 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam.\n upDirection(space='world')), 'look': list(cam.viewDirection(space=\n 'world'))}\n bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem))\n geomList = pmc.ls(type='mesh', visible=True)\n mainFileGeoms = []\n offset = 0\n with open(bufPath, 'wb') as bufFd:\n for geom in geomList:\n print('Processing {}...'.format(geom))\n smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0]\n isSmooth = smoothLevel > 1\n print('Smooth level {}'.format(smoothLevel))\n faceBuf = ''\n idxBuf = ''\n vtxBuf = ''\n nidxs = 0\n for face in geom.f:\n vtxidxs = face.getVertices()\n nvtxidxs = len(vtxidxs)\n if not isSmooth and wantTris:\n if nvtxidxs > 3:\n print(\n 'Non-triangulated face. Triangulate before exporting'\n )\n return\n else:\n faceBuf += struct.pack('<I', nvtxidxs)\n nidxs += nvtxidxs\n for vtxidx in vtxidxs:\n idxBuf += struct.pack('<I', vtxidx)\n for vertex in geom.vtx:\n p = vertex.getPosition('world')\n vtxBuf += struct.pack('<fff', p.x, p.y, p.z)\n hasCreases = False\n if isSmooth:\n edges = geom.edges\n creaseIdxBuf = ''\n creaseValBuf = ''\n creases = pmc.modeling.polyCrease(edges, q=True, v=0)\n for e in range(0, len(edges)):\n c = creases[e]\n if c > 0:\n hasCreases = True\n vtxs = edges[e].connectedVertices()\n creaseIdxBuf += struct.pack('<I', vtxs[0].index())\n creaseIdxBuf += struct.pack('<I', vtxs[1].index())\n creaseValBuf += struct.pack('<f', c)\n buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')]\n if not wantTris:\n buffers += [(faceBuf, 'faces')]\n if hasCreases:\n buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf,\n 'creasevalues')]\n buffersList = []\n for b in buffers:\n print('Writing buffer {}'.format(b[1]))\n bufFd.write(b[0])\n s = len(b[0])\n buffersList.append({'offset': offset, 'size': s, 'type': b[1]})\n offset += s\n sg = geom.connections(t='shadingEngine')[0]\n mat = sg.surfaceShader.connections()[0]\n albedo = mat.color.get()\n emittance = mat.incandescence.get()\n geomDict = {'triangles': wantTris, 'smooth': isSmooth,\n 'buffers': buffersList, 'material': {'albedo': list(albedo),\n 'emittance': list(emittance)}}\n mainFileGeoms.append(geomDict)\n mainFileDict['geometries'] = mainFileGeoms\n mainFileJson = json.dumps(mainFileDict, indent=2)\n with open(mainFilePath, 'w') as fd:\n fd.write(mainFileJson)\n print('Done')\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# len(): tamanho da string # count(): conta quantas vezes um caractere aparece # lower(), upper() # replace(): substitui as letras por outra # split(): quebra uma string a partir dos espacos em branco a = len('Karen') print(a) b = 'Rainha Elizabeth'.count('a') print(b) c = 'karen nayara'.replace('a','@') print(c) d = 'karen meeseeks gomes'.split() print(d)
normal
{ "blob_id": "3079fdbe6319454ad166d06bda5670554a5746ee", "index": 1004, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(a)\n<mask token>\nprint(b)\n<mask token>\nprint(c)\n<mask token>\nprint(d)\n", "step-3": "a = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a', '@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)\n", "step-4": "# len(): tamanho da string\n# count(): conta quantas vezes um caractere aparece\n# lower(), upper()\n# replace(): substitui as letras por outra\n# split(): quebra uma string a partir dos espacos em branco\n\na = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a','@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> class TimeEntry: <|reserved_special_token_0|> <|reserved_special_token_1|> class TimeEntry: def __init__(self, date, duration, togglproject='default toggl', tdproject='default td', togglID='NULL', tdID='Null'): self.duration = duration self.date = date self.togglProject = togglproject self.tdProject = tdproject self.togglID = togglID self.tdID = tdID
flexible
{ "blob_id": "bdf2c35c12820dd31bd242ce1b6dae7271ceb2b7", "index": 8433, "step-1": "<mask token>\n", "step-2": "class TimeEntry:\n <mask token>\n", "step-3": "class TimeEntry:\n\n def __init__(self, date, duration, togglproject='default toggl',\n tdproject='default td', togglID='NULL', tdID='Null'):\n self.duration = duration\n self.date = date\n self.togglProject = togglproject\n self.tdProject = tdproject\n self.togglID = togglID\n self.tdID = tdID\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> @attr('unit') class TestScp(unittest.TestCase): <|reserved_special_token_0|> <|reserved_special_token_0|> @patch('paramiko.SSHClient') def test_scp_close(self, mock_connect): self.dev.bind(scp=SCP) self.dev.scp.open() self.assertEqual(self.dev.scp.close(), None) @patch('paramiko.SSHClient') def test_scp_context(self, mock_connect): with SCP(self.dev) as scp: scp.get('addrbook.conf') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') @patch('paramiko.SSHClient') @patch('paramiko.proxy.ProxyCommand') def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy): os_mock.path.exists.return_value = True self.dev._sshconf_path = '/home/rsherman/.ssh/config' with SCP(self.dev) as scp: scp.get('addrbook.conf') mock_proxy.assert_called_any() <|reserved_special_token_1|> <|reserved_special_token_0|> @attr('unit') class TestScp(unittest.TestCase): def setUp(self): self.dev = Device(host='1.1.1.1') @patch('paramiko.SSHClient') def test_scp_open(self, mock_connect): from scp import SCPClient self.dev.bind(scp=SCP) assert isinstance(self.dev.scp.open(), SCPClient) @patch('paramiko.SSHClient') def test_scp_close(self, mock_connect): self.dev.bind(scp=SCP) self.dev.scp.open() self.assertEqual(self.dev.scp.close(), None) @patch('paramiko.SSHClient') def test_scp_context(self, mock_connect): with SCP(self.dev) as scp: scp.get('addrbook.conf') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') @patch('paramiko.SSHClient') @patch('paramiko.proxy.ProxyCommand') def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy): os_mock.path.exists.return_value = True self.dev._sshconf_path = '/home/rsherman/.ssh/config' with SCP(self.dev) as scp: scp.get('addrbook.conf') mock_proxy.assert_called_any() <|reserved_special_token_1|> __author__ = 'Rick Sherman' __credits__ = 'Jeremy Schulman, Nitin Kumar' <|reserved_special_token_0|> @attr('unit') class TestScp(unittest.TestCase): def setUp(self): self.dev = Device(host='1.1.1.1') @patch('paramiko.SSHClient') def test_scp_open(self, mock_connect): from scp import SCPClient self.dev.bind(scp=SCP) assert isinstance(self.dev.scp.open(), SCPClient) @patch('paramiko.SSHClient') def test_scp_close(self, mock_connect): self.dev.bind(scp=SCP) self.dev.scp.open() self.assertEqual(self.dev.scp.close(), None) @patch('paramiko.SSHClient') def test_scp_context(self, mock_connect): with SCP(self.dev) as scp: scp.get('addrbook.conf') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') @patch('paramiko.SSHClient') @patch('paramiko.proxy.ProxyCommand') def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy): os_mock.path.exists.return_value = True self.dev._sshconf_path = '/home/rsherman/.ssh/config' with SCP(self.dev) as scp: scp.get('addrbook.conf') mock_proxy.assert_called_any() <|reserved_special_token_1|> __author__ = 'Rick Sherman' __credits__ = 'Jeremy Schulman, Nitin Kumar' import unittest from nose.plugins.attrib import attr from jnpr.junos import Device from jnpr.junos.utils.scp import SCP from mock import patch @attr('unit') class TestScp(unittest.TestCase): def setUp(self): self.dev = Device(host='1.1.1.1') @patch('paramiko.SSHClient') def test_scp_open(self, mock_connect): from scp import SCPClient self.dev.bind(scp=SCP) assert isinstance(self.dev.scp.open(), SCPClient) @patch('paramiko.SSHClient') def test_scp_close(self, mock_connect): self.dev.bind(scp=SCP) self.dev.scp.open() self.assertEqual(self.dev.scp.close(), None) @patch('paramiko.SSHClient') def test_scp_context(self, mock_connect): with SCP(self.dev) as scp: scp.get('addrbook.conf') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') @patch('paramiko.SSHClient') @patch('paramiko.proxy.ProxyCommand') def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy): os_mock.path.exists.return_value = True self.dev._sshconf_path = '/home/rsherman/.ssh/config' with SCP(self.dev) as scp: scp.get('addrbook.conf') mock_proxy.assert_called_any() <|reserved_special_token_1|> __author__ = "Rick Sherman" __credits__ = "Jeremy Schulman, Nitin Kumar" import unittest from nose.plugins.attrib import attr from jnpr.junos import Device from jnpr.junos.utils.scp import SCP from mock import patch @attr('unit') class TestScp(unittest.TestCase): def setUp(self): self.dev = Device(host='1.1.1.1') @patch('paramiko.SSHClient') def test_scp_open(self, mock_connect): from scp import SCPClient self.dev.bind(scp=SCP) assert isinstance(self.dev.scp.open(), SCPClient) @patch('paramiko.SSHClient') def test_scp_close(self, mock_connect): self.dev.bind(scp=SCP) self.dev.scp.open() self.assertEqual(self.dev.scp.close(), None) @patch('paramiko.SSHClient') def test_scp_context(self, mock_connect): with SCP(self.dev) as scp: scp.get('addrbook.conf') @patch('jnpr.junos.device.os') @patch('__builtin__.open') @patch('paramiko.config.SSHConfig.lookup') @patch('paramiko.SSHClient') @patch('paramiko.proxy.ProxyCommand') def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy): os_mock.path.exists.return_value = True self.dev._sshconf_path = '/home/rsherman/.ssh/config' with SCP(self.dev) as scp: scp.get('addrbook.conf') mock_proxy.assert_called_any()
flexible
{ "blob_id": "65ea40ad1c1bf6bf23aed5316b91862c9cdc353d", "index": 5564, "step-1": "<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n <mask token>\n <mask token>\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n", "step-2": "<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n", "step-3": "__author__ = 'Rick Sherman'\n__credits__ = 'Jeremy Schulman, Nitin Kumar'\n<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n", "step-4": "__author__ = 'Rick Sherman'\n__credits__ = 'Jeremy Schulman, Nitin Kumar'\nimport unittest\nfrom nose.plugins.attrib import attr\nfrom jnpr.junos import Device\nfrom jnpr.junos.utils.scp import SCP\nfrom mock import patch\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n", "step-5": "__author__ = \"Rick Sherman\"\n__credits__ = \"Jeremy Schulman, Nitin Kumar\"\n\nimport unittest\nfrom nose.plugins.attrib import attr\n\nfrom jnpr.junos import Device\nfrom jnpr.junos.utils.scp import SCP\n\nfrom mock import patch\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> print('hello world') print('welcome to london') <|reserved_special_token_1|> print("hello world") print("welcome to london")
flexible
{ "blob_id": "cd322f9771f1ac90931a7229ffd5effd1cae1a54", "index": 7207, "step-1": "<mask token>\n", "step-2": "print('hello world')\nprint('welcome to london')\n", "step-3": "print(\"hello world\")\nprint(\"welcome to london\")", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from load_blender_data import pose_spherical from misc import mse, mse2psnr, to8b import os import imageio import json import torch import torch.nn as nn import numpy as np import cv2 from torch.utils.data.dataset import Dataset from torch.utils.data.dataloader import DataLoader device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') class MLP(nn.Module): def __init__(self, in_ch=2, num_layers=4, num_neurons=256): super(MLP, self).__init__() layers = [] layers.append(nn.Linear(in_ch, num_neurons)) layers.append(nn.ReLU()) for i in range(1, num_layers-1): layers.append(nn.Linear(num_neurons, num_neurons)) layers.append(nn.ReLU()) layers.append(nn.Linear(num_neurons, 3)) layers.append(nn.Sigmoid()) self.linears = nn.ModuleList(layers) def forward(self, x): for layer in self.linears: x = layer(x) return x class BlenderDataset(Dataset): def __init__(self, datadir, split='train', testskip=8): super(BlenderDataset, self).__init__() imgs = [] with open(os.path.join(datadir, split+".txt")) as f: lines = f.readlines() for i, line in enumerate(lines): name = line.strip() pose_path = os.path.join(datadir, name, 'rendering/transforms.json') with open(pose_path, 'r') as f: cam_params = json.load(f)['frames'] for cam_param in cam_params: img_name = cam_param['file_path'] imgs.append(os.path.join(datadir, name, f'rendering/{img_name}.png')) self.images = imgs print(f'{split} dataset: {len(self.images)}') def get_rays_np(self, H, W, focal, c2w): i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy') dirs = np.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -np.ones_like(i)], -1) # Rotate ray directions from camera frame to the world frame rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] # Translate camera frame's origin to the world frame. It is the origin of all rays. rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d)) return rays_o, rays_d # def __getitem__(self, idx): # img = self.images[idx] # pose = self.poses[idx] # H, W = img.shape[:2] # rays_o, rays_d = self.get_rays_np(H, W, self.focal, pose) # # ret = {'img':img.transpose((2, 0, 1)), # # 'rays_o': rays_o.transpose((2, 0, 1)), # # 'rays_d': rays_d.transpose((2, 0, 1))} # ret = {'img': img, # 'rays_o': rays_o, # 'rays_d': rays_d} # return ret def get_coords2d(self, H, W): coord = np.linspace(0, 1, H, endpoint=False) coords = np.stack(np.meshgrid(coord, coord), -1) return coords def __getitem__(self, idx): img_path = self.images[idx] img = cv2.imread(img_path, cv2.IMREAD_COLOR) img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) / 255. H, W = img.shape[:2] rays_o = self.get_coords2d(H, W) ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.float32)} return ret def __len__(self): return len(self.images) class MLPRunner(object): def __init__(self, args): self.basedir = args.basedir self.expname = args.expname self.num_layers = 4 self.num_neurons = 256 self.mapping_size = 256 self.num_epoch = 1000 # on average, each image is seen by network num_epoch times self.val_epoch = 100 self.lr = 1e-4 self.batch_size = args.batch_size self.num_workers = args.num_workers self.train_set = BlenderDataset(args.datadir, split='train') self.train_loader = DataLoader(self.train_set, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) self.val_set = BlenderDataset(args.datadir, split='val') self.val_idxs = [i for i in range(len(self.val_set))] self.i_print = 1000 self.scale = 10 self.in_ch = self.mapping_size * 2 self.B_gauss = torch.randn((self.mapping_size, 2)).to(device) self.model = MLP(in_ch=self.in_ch) self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) def embed(self, x, B): if B is None: return x else: x_proj = (2. * np.pi * x).matmul(B.transpose(1, 0)) return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1) def train(self): self.model.to(device) global_step = 0 for epoch in range(self.num_epoch): for i, data in enumerate(self.train_loader): img = data['img'].to(device) rays_o = data['rays_o'].to(device) embedding = self.embed(rays_o, self.B_gauss) embedding = embedding.reshape((-1, embedding.shape[-1])) img_pred = self.model.forward(embedding) img_pred = img_pred.reshape(img.shape) loss = mse(img_pred, img) psnr = mse2psnr(loss) self.optimizer.zero_grad() loss.backward() self.optimizer.step() if global_step % self.i_print == 0: print(f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}') # cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_gt_{epoch}_{global_step}.png'), # to8b(img[0].detach().cpu().numpy())) cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_{epoch}_{global_step}.png'), to8b(img_pred[0].detach().cpu().numpy())) global_step += 1 if epoch % self.val_epoch == 0: idx = np.random.choice(self.val_idxs, 1)[0] data = self.val_set.__getitem__(idx) img = torch.tensor(data['img']).to(device) rays_o = torch.tensor(data['rays_o']).to(device) with torch.no_grad(): embedding = self.embed(rays_o, self.B_gauss) embedding = embedding.reshape((-1, embedding.shape[-1])) img_pred = self.model.forward(embedding) img_pred = img_pred.reshape(img.shape) loss = mse(img_pred, img) psnr = mse2psnr(loss) print(f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}') # cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_gt_{epoch}_{global_step}.png'), # to8b(img.detach().cpu().numpy())) cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_{epoch}_{global_step}.png'), to8b(img_pred.detach().cpu().numpy()))
normal
{ "blob_id": "7180dc0d622fd449fcee32f2c50000d05ae2d8bb", "index": 6850, "step-1": "<mask token>\n\n\nclass BlenderDataset(Dataset):\n <mask token>\n <mask token>\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n <mask token>\n <mask token>\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n", "step-2": "<mask token>\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n <mask token>\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n", "step-3": "<mask token>\n\n\nclass MLP(nn.Module):\n\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers - 1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n <mask token>\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H,\n dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * 0.5) / focal, -(j - H * 0.5) / focal, -np\n .ones_like(i)], -1)\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n", "step-4": "<mask token>\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device(\n 'cpu')\n\n\nclass MLP(nn.Module):\n\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers - 1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n\n def forward(self, x):\n for layer in self.linears:\n x = layer(x)\n return x\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H,\n dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * 0.5) / focal, -(j - H * 0.5) / focal, -np\n .ones_like(i)], -1)\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n", "step-5": "from load_blender_data import pose_spherical\nfrom misc import mse, mse2psnr, to8b\n\nimport os\nimport imageio\nimport json\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport cv2\n\n\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.dataloader import DataLoader\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\nclass MLP(nn.Module):\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers-1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n\n def forward(self, x):\n for layer in self.linears:\n x = layer(x)\n return x\n\nclass BlenderDataset(Dataset):\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split+\".txt\")) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name, 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name, f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -np.ones_like(i)], -1)\n # Rotate ray directions from camera frame to the world frame\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3],\n -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]\n # Translate camera frame's origin to the world frame. It is the origin of all rays.\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n # def __getitem__(self, idx):\n # img = self.images[idx]\n # pose = self.poses[idx]\n # H, W = img.shape[:2]\n # rays_o, rays_d = self.get_rays_np(H, W, self.focal, pose)\n # # ret = {'img':img.transpose((2, 0, 1)),\n # # 'rays_o': rays_o.transpose((2, 0, 1)),\n # # 'rays_d': rays_d.transpose((2, 0, 1))}\n # ret = {'img': img,\n # 'rays_o': rays_o,\n # 'rays_d': rays_d}\n # return ret\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) / 255.\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000 # on average, each image is seen by network num_epoch times\n self.val_epoch = 100\n self.lr = 1e-4\n\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2. * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if global_step % self.i_print == 0:\n print(f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}')\n # cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_gt_{epoch}_{global_step}.png'),\n # to8b(img[0].detach().cpu().numpy()))\n cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_{epoch}_{global_step}.png'),\n to8b(img_pred[0].detach().cpu().numpy()))\n global_step += 1\n\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}')\n # cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_gt_{epoch}_{global_step}.png'),\n # to8b(img.detach().cpu().numpy()))\n cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_{epoch}_{global_step}.png'),\n to8b(img_pred.detach().cpu().numpy()))\n", "step-ids": [ 6, 9, 12, 14, 16 ] }
[ 6, 9, 12, 14, 16 ]
def foo(x, y=5): def bar(x): return x + 1 return bar(y * 2) print(foo(3))
normal
{ "blob_id": "80d1979c5767d0ff90f464651c9d0ca6d65effb2", "index": 6472, "step-1": "<mask token>\n", "step-2": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\n<mask token>\n", "step-3": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\nprint(foo(3))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('education', '0005_auto_20180927_1041')] operations = [migrations.RemoveField(model_name='educationgroup', name= 'students'), migrations.AddField(model_name='student', name= 'education_groups', field=models.ManyToManyField(blank=True, to= 'education.EducationGroup', verbose_name='Education Groups'))] <|reserved_special_token_1|> from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('education', '0005_auto_20180927_1041')] operations = [migrations.RemoveField(model_name='educationgroup', name= 'students'), migrations.AddField(model_name='student', name= 'education_groups', field=models.ManyToManyField(blank=True, to= 'education.EducationGroup', verbose_name='Education Groups'))] <|reserved_special_token_1|> # Generated by Django 2.0.7 on 2018-09-27 13:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('education', '0005_auto_20180927_1041'), ] operations = [ migrations.RemoveField( model_name='educationgroup', name='students', ), migrations.AddField( model_name='student', name='education_groups', field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'), ), ]
flexible
{ "blob_id": "8ff7ace102b781b35fff0671e2c606bf662e2767", "index": 9851, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('education', '0005_auto_20180927_1041')]\n operations = [migrations.RemoveField(model_name='educationgroup', name=\n 'students'), migrations.AddField(model_name='student', name=\n 'education_groups', field=models.ManyToManyField(blank=True, to=\n 'education.EducationGroup', verbose_name='Education Groups'))]\n", "step-5": "# Generated by Django 2.0.7 on 2018-09-27 13:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('education', '0005_auto_20180927_1041'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='educationgroup',\n name='students',\n ),\n migrations.AddField(\n model_name='student',\n name='education_groups',\n field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" # listbinmin.py # Sam Connolly 04/03/2013 #=============================================================================== # bin data according a given column in an ascii file of column data, such that # each bin has a minimum number of points, giving the bin of each data point as # a LIST. UNEVEN BINS. #=============================================================================== """ # Import packages import numpy as np #================ PARAMETERS =================================================== # read variables header = 0 # number of header lines to ignore outdata = [1, 3] # column numbers to output bincolumn = 3 # column to bin along errorcolumn = 4 binmin = 18 # File routes route = "/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/"\ + "lightcurve/refinedCounts/" # file name infilename = "NGC1365_lcurve_4_0.5-10keV.qdp" # Save output? # histogram of binning? hist = True # Save output? save = True savefile = "binsout.dat" outroute = "/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/"\ #+ "spectra/" # Highlighted lightcurve? lc = True timecolumn = 2 labels = False #==================== Load data ================================================ # create file routes location = route+infilename savelocation = outroute+savefile # read data intoarray start = 0 infile= open(location, 'r') for line in infile: linedata = line.split() if start == header: columns = len(linedata) data = [[] for x in range(columns)] if start >= header: for column in range(columns): if len(linedata) == columns: data[column].append(float(linedata[column])) start += 1 infile.close() outdata = np.array(outdata) outdata -= 1 bincolumn -= 1 errorcolumn -= 1 #========================= Sort ================================================ start = True for index in range(len(data[0])): if start == True: sortindex = [index] start = False else: i = 0 if data[bincolumn][index] < data[bincolumn][sortindex[-1]]: while data[bincolumn][index] > data[bincolumn][sortindex[i]]: i += 1 sortindex.insert(i,index) else: sortindex.append(index) #======================== Bin ================================================== bins = [] for index in np.arange(0,int(len(sortindex)),binmin): this = [[],0,0,0,0,0] err = [] total = 0 for i in range(binmin): if index+i <= len(sortindex) - 1: this[0].append(sortindex[index+i]) err.append(data[errorcolumn][sortindex[index+i]]) total += data[countscolumn][sortindex[index+i] this[1] = data[bincolumn][sortindex[index]] # bin min if index+binmin-1 <= len(sortindex) - 1: # bin max this[2] = data[bincolumn][sortindex[index+binmin-1]] else: this[2] = max(data[bincolumn]) this[3] = (this[2]+this[1])/2.0 err = np.array(err) this[4] = sum(err**2) this[5] = total bins.append(this) print bins #======================== print output ========================================= if save == True: out = open(savelocation,'w') for b in range(len(bins)): low = bins[b][1] high = bins[b][2] mid = bins[b][3] errs = bins[b][4] print low, " >= x > ", high, " ==> ",mid, ' +/- ', errs , " :\n" if save == True: out.write(str(low) + " >=x> " + str(high) + " :\n") output = '' for index in bins[b][0]: for dat in outdata: if dat != bincolumn: output = output + str(data[dat][index]) + '\t' print output, "\n" if save == True: out.write(output+"\n") print "number of bins: ", len(bins) if save == True: out.write("number of bins: " + str(len(bins))) out.close() # plots nplots = 0 if hist: nplots += 1 if lc: nplots += 1 if nplots == 1: fig = plt.figure() ax = fig.add_subplot(1,1,1) if nplots == 2: fig = plt.figure() # histogram if hist: if nplots == 2: ax = fig.add_subplot(1,2,1) edges = [] counts = [] widths = [] for b in range(len(bins)): edges.append(bins[b][1]) counts.append(bins[b][2]) try: widths.append(data[bincolumn][bins[b+1][0][0]]-\ data[bincolumn][bins[b][0][0]]) except IndexError: widths.append(data[bincolumn][bins[b][0][-1]]-\ data[bincolumn][bins[b][0][0]]) plt.bar(edges,counts,widths) # highlighted lightcurve if lc: if nplots == 2: ax = fig.add_subplot(1,2,2) plt.scatter(data[timecolumn],data[bincolumn]) for b in range(len(bins)): try: plt.axhspan(data[bincolumn][bins[b][0][0]], \ data[bincolumn][bins[b+1][0][0]],alpha = 0.3) except IndexError: plt.axhspan(data[bincolumn][bins[b][0][0]], \ data[bincolumn][bins[b][0][-1]],alpha = 0.3) if labels: for index in range(len(data[0])): plt.annotate(data[-1][index], xy = (data[timecolumn][index],data[bincolumn][index]), xytext = (-20,20), textcoords = 'offset points', ha = 'right', va = 'bottom', bbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue', alpha = 0.5), arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')) if nplots > 0: plt.show()
normal
{ "blob_id": "496c58e68d3ac78a3eb1272d61ca3603c5d843b6", "index": 4787, "step-1": "\"\"\"\n# listbinmin.py\n# Sam Connolly 04/03/2013\n\n#===============================================================================\n# bin data according a given column in an ascii file of column data, such that\n# each bin has a minimum number of points, giving the bin of each data point as \n# a LIST. UNEVEN BINS.\n#===============================================================================\n\"\"\"\n\n# Import packages\n\nimport numpy as np\n\n#================ PARAMETERS ===================================================\n\n# read variables\nheader = 0 # number of header lines to ignore\n\noutdata = [1, 3] # column numbers to output\n\nbincolumn = 3 # column to bin along\nerrorcolumn = 4\nbinmin = 18\n\n# File routes\nroute = \"/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/\"\\\n\t\t\t\t\t\t+ \"lightcurve/refinedCounts/\"\n\n# file name\ninfilename \t= \"NGC1365_lcurve_4_0.5-10keV.qdp\"\n\n\n# Save output?\n\n# histogram of binning?\n\nhist = True\n\n# Save output?\n\nsave = True\nsavefile = \"binsout.dat\"\n\noutroute = \"/disks/raid/raid1/xray/raid/sdc1g08/NetData/ngc1365/\"\\\n\t\t\t\t\t\t#+ \"spectra/\"\n\n# Highlighted lightcurve?\n\nlc = True\ntimecolumn = 2\nlabels = False\n\n#==================== Load data ================================================\n\n# create file routes\nlocation = route+infilename\nsavelocation = outroute+savefile\n\n# read data intoarray\n\nstart = 0\n\ninfile= open(location, 'r')\n\nfor line in infile:\n\t\n\tlinedata = line.split()\n\n\tif start == header:\n\t\tcolumns = len(linedata)\n\t\tdata = [[] for x in range(columns)]\n\n\tif start >= header:\n\t\tfor column in range(columns):\n\t\t\tif len(linedata) == columns:\n\t\t\t\tdata[column].append(float(linedata[column]))\n\t\t\n\tstart += 1\n\ninfile.close()\n\noutdata = np.array(outdata)\t\noutdata -= 1 \nbincolumn -= 1\nerrorcolumn -= 1\n\n#========================= Sort ================================================\n\nstart = True\n\nfor index in range(len(data[0])):\n\n\tif start == True:\n\n\t\tsortindex = [index]\n\n\t\tstart = False\n\t\n\telse:\n\n\t\ti = 0\n\n\t\tif data[bincolumn][index] < data[bincolumn][sortindex[-1]]:\n\t\t\twhile data[bincolumn][index] > data[bincolumn][sortindex[i]]:\n\n\t\t\t\ti += 1\n\n\t\t\tsortindex.insert(i,index)\n\n\t\telse:\n\t\t\t\n\t\t\tsortindex.append(index)\n\n#======================== Bin ==================================================\n\nbins = []\n\nfor index in np.arange(0,int(len(sortindex)),binmin):\n\n\tthis = [[],0,0,0,0,0]\n\terr = []\n\ttotal = 0\n\n\tfor i in range(binmin):\n\n\t\tif index+i <= len(sortindex) - 1:\n\t\t\tthis[0].append(sortindex[index+i]) \n\t\t\terr.append(data[errorcolumn][sortindex[index+i]])\n\n\t\t\ttotal += data[countscolumn][sortindex[index+i]\n\n\tthis[1] = data[bincolumn][sortindex[index]] # bin min\n\n\tif index+binmin-1 <= len(sortindex) - 1:\t# bin max\n\n\t\tthis[2] = data[bincolumn][sortindex[index+binmin-1]]\n\t\t\n\telse:\n\t\tthis[2] = max(data[bincolumn])\n\n\tthis[3] = (this[2]+this[1])/2.0\n\n\terr = np.array(err)\n\n\tthis[4] = sum(err**2)\n\n\tthis[5] = total\n\n\tbins.append(this)\n\nprint bins\n\n#======================== print output =========================================\nif save == True:\n\n\tout = open(savelocation,'w')\n\nfor b in range(len(bins)):\n\n\tlow = bins[b][1]\n\thigh = bins[b][2]\n\tmid\t = bins[b][3]\n\terrs = bins[b][4]\n\n\tprint low, \" >= x > \", high, \" ==> \",mid, ' +/- ', errs , \" :\\n\"\n\n\tif save == True:\n\n\t\tout.write(str(low) + \" >=x> \" + str(high) + \" :\\n\")\n\n\toutput = ''\n\n\tfor index in bins[b][0]:\n\t\tfor dat in outdata:\n\t\t\tif dat != bincolumn:\n\n\t\t\t\toutput = output + str(data[dat][index]) + '\\t'\n\n\tprint output, \"\\n\"\n\n\tif save == True:\n\n\t\tout.write(output+\"\\n\")\n\n\nprint \"number of bins: \", len(bins)\n\nif save == True:\n\n\tout.write(\"number of bins: \" + str(len(bins)))\n\n\tout.close()\n\n# plots\n\nnplots = 0\n\nif hist:\n\n\tnplots += 1\n\nif lc:\n\n\tnplots += 1\n\nif nplots == 1:\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(1,1,1)\n\nif nplots == 2:\n\n\tfig = plt.figure()\n\n# histogram\n\nif hist:\n\n\tif nplots == 2:\n\t\tax = fig.add_subplot(1,2,1)\n\n\tedges = []\n\tcounts = []\n\twidths = []\n\n\tfor b in range(len(bins)):\n\n\t\tedges.append(bins[b][1])\n\t\tcounts.append(bins[b][2])\n\t\ttry:\n\t\t\twidths.append(data[bincolumn][bins[b+1][0][0]]-\\\n\t\t\t\tdata[bincolumn][bins[b][0][0]])\n\t\texcept IndexError:\n\t\t\twidths.append(data[bincolumn][bins[b][0][-1]]-\\\n\t\t\t\tdata[bincolumn][bins[b][0][0]])\n\n\tplt.bar(edges,counts,widths)\n\n\n\n\n# highlighted lightcurve\n\nif lc:\n\n\tif nplots == 2:\n\t\tax = fig.add_subplot(1,2,2)\n\n\tplt.scatter(data[timecolumn],data[bincolumn])\n\n\tfor b in range(len(bins)):\n\n\t\ttry:\n\t\t\tplt.axhspan(data[bincolumn][bins[b][0][0]], \\\n\t\t\t\t\t\t\tdata[bincolumn][bins[b+1][0][0]],alpha = 0.3)\n\t\texcept IndexError:\n\t\t\tplt.axhspan(data[bincolumn][bins[b][0][0]], \\\n\t\t\t\t\t\t\tdata[bincolumn][bins[b][0][-1]],alpha = 0.3)\n\n\tif labels:\n\t\tfor index in range(len(data[0])):\n\t\t\tplt.annotate(data[-1][index], \n\t\t\t\t\txy = (data[timecolumn][index],data[bincolumn][index]), \n\t\t\t\t\txytext = (-20,20),\n\t\t\t\t\ttextcoords = 'offset points', ha = 'right', va = 'bottom',\n\t\t\t\t\tbbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue', \n\t\t\t\t\talpha = 0.5), arrowprops = dict(arrowstyle = '->', \n\t\t\t\t\tconnectionstyle = 'arc3,rad=0'))\t\n\n\nif nplots > 0:\n\n\tplt.show()\n\n\n\n\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class LRUCache: <|reserved_special_token_0|> <|reserved_special_token_0|> def put(self, key, value): if key in self.cache_map: old_node = self.cache_map.get(key) self.cache_list.remove(old_node) new_node = Node(key, value) self.cache_list.append(new_node) self.cache_map[key] = new_node else: if self.size == self.capacity: old_node = self.cache_list.remove() self.cache_map.pop(old_node.key) else: self.size += 1 new_node = Node(key, value) self.cache_list.append_front(new_node) self.cache_map[key] = new_node <|reserved_special_token_1|> <|reserved_special_token_0|> class LRUCache: def __init__(self, capacity): self.capacity = capacity self.size = 0 self.cache_map = {} self.cache_list = DoubleLinkedList(capacity=capacity) <|reserved_special_token_0|> def put(self, key, value): if key in self.cache_map: old_node = self.cache_map.get(key) self.cache_list.remove(old_node) new_node = Node(key, value) self.cache_list.append(new_node) self.cache_map[key] = new_node else: if self.size == self.capacity: old_node = self.cache_list.remove() self.cache_map.pop(old_node.key) else: self.size += 1 new_node = Node(key, value) self.cache_list.append_front(new_node) self.cache_map[key] = new_node <|reserved_special_token_1|> <|reserved_special_token_0|> class LRUCache: def __init__(self, capacity): self.capacity = capacity self.size = 0 self.cache_map = {} self.cache_list = DoubleLinkedList(capacity=capacity) def get(self, key): if key not in self.cache_map: return -1 else: node = self.cache_map.get(key) self.cache_list.remove(node) self.cache_list.append_front(node) return node.value def put(self, key, value): if key in self.cache_map: old_node = self.cache_map.get(key) self.cache_list.remove(old_node) new_node = Node(key, value) self.cache_list.append(new_node) self.cache_map[key] = new_node else: if self.size == self.capacity: old_node = self.cache_list.remove() self.cache_map.pop(old_node.key) else: self.size += 1 new_node = Node(key, value) self.cache_list.append_front(new_node) self.cache_map[key] = new_node <|reserved_special_token_1|> from cache_replacement.double_linked_list import DoubleLinkedList from cache_replacement.node import Node class LRUCache: def __init__(self, capacity): self.capacity = capacity self.size = 0 self.cache_map = {} self.cache_list = DoubleLinkedList(capacity=capacity) def get(self, key): if key not in self.cache_map: return -1 else: node = self.cache_map.get(key) self.cache_list.remove(node) self.cache_list.append_front(node) return node.value def put(self, key, value): if key in self.cache_map: old_node = self.cache_map.get(key) self.cache_list.remove(old_node) new_node = Node(key, value) self.cache_list.append(new_node) self.cache_map[key] = new_node else: if self.size == self.capacity: old_node = self.cache_list.remove() self.cache_map.pop(old_node.key) else: self.size += 1 new_node = Node(key, value) self.cache_list.append_front(new_node) self.cache_map[key] = new_node
flexible
{ "blob_id": "898ff6e38e80419d61ec4bbde827e8ca729eb19a", "index": 5202, "step-1": "<mask token>\n\n\nclass LRUCache:\n <mask token>\n <mask token>\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n", "step-2": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n <mask token>\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n", "step-3": "<mask token>\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n\n def get(self, key):\n if key not in self.cache_map:\n return -1\n else:\n node = self.cache_map.get(key)\n self.cache_list.remove(node)\n self.cache_list.append_front(node)\n return node.value\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n", "step-4": "from cache_replacement.double_linked_list import DoubleLinkedList\nfrom cache_replacement.node import Node\n\n\nclass LRUCache:\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.size = 0\n self.cache_map = {}\n self.cache_list = DoubleLinkedList(capacity=capacity)\n\n def get(self, key):\n if key not in self.cache_map:\n return -1\n else:\n node = self.cache_map.get(key)\n self.cache_list.remove(node)\n self.cache_list.append_front(node)\n return node.value\n\n def put(self, key, value):\n if key in self.cache_map:\n old_node = self.cache_map.get(key)\n self.cache_list.remove(old_node)\n new_node = Node(key, value)\n self.cache_list.append(new_node)\n self.cache_map[key] = new_node\n else:\n if self.size == self.capacity:\n old_node = self.cache_list.remove()\n self.cache_map.pop(old_node.key)\n else:\n self.size += 1\n new_node = Node(key, value)\n self.cache_list.append_front(new_node)\n self.cache_map[key] = new_node\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
import math n, m, a = map(int, input().split()) top = math.ceil(n / a) bottom = math.ceil(m / a) print(top * bottom)
normal
{ "blob_id": "6c426d2b165e01a7cec9f7ddbd96113ae05668f6", "index": 4898, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(top * bottom)\n", "step-3": "<mask token>\nn, m, a = map(int, input().split())\ntop = math.ceil(n / a)\nbottom = math.ceil(m / a)\nprint(top * bottom)\n", "step-4": "import math\nn, m, a = map(int, input().split())\ntop = math.ceil(n / a)\nbottom = math.ceil(m / a)\nprint(top * bottom)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 import sys all_neighbors_coord = [] for i in range(-1, 2): for j in range(-1, 2): for k in range(-1, 2): if i != 0 or j != 0 or k != 0: all_neighbors_coord.append((i, j, k)) def add_coord(c1, c2): return (c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]) class life: def __init__(self, world): self.world = world def get_world_size(self): xs = [c[0] for c in self.world] ys = [c[1] for c in self.world] zs = [c[2] for c in self.world] return ((min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))) def is_active(self, coord): return coord in self.world def count_active_neighbors(self, coord): return len(list(filter(lambda c: self.is_active(add_coord(coord, c)), all_neighbors_coord))) def get_next_square_state(self, coord, next_world): if self.is_active(coord): if self.count_active_neighbors(coord) in [2, 3]: next_world[coord] = '#' else: if self.count_active_neighbors(coord) == 3: next_world[coord] = '#' def step(self): next_world = {} ws = self.get_world_size() for i in range(ws[0][0]-1,ws[1][0]+2): for j in range(ws[0][1]-1,ws[1][1]+2): for k in range(ws[0][2]-1,ws[1][2]+2): self.get_next_square_state((i,j,k), next_world) self.world = next_world def run(self, steps): for _i in range(0, steps): self.step() self.print() def count_active(self): return len(self.world) def print(self): ws = self.get_world_size() for k in range(ws[0][2], ws[1][2]+1): print('z={}'.format(k)) print() for j in range(ws[0][1], ws[1][1]+1): s = '' for i in range(ws[0][0], ws[1][0]+1): if self.is_active((i,j,k)): s += '#' else: s += '.' print(s) print() def parse_world(rows): world = {} k = 0 for j, r in enumerate(rows): for i, c in enumerate(r): if c == '#': world[(i,j,k)] = '#' return world inp = 'test.txt' if len(sys.argv) == 2: inp = sys.argv[1] world = parse_world([r.strip() for r in open(inp, 'r').readlines()]) l = life(world) l.print() l.run(6) print(l.count_active())
normal
{ "blob_id": "e7060658ae1838b0870b2a3adb61c9f8d78c93c7", "index": 3245, "step-1": "<mask token>\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef add_coord(c1, c2):\n return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\n<mask token>\n", "step-4": "<mask token>\nall_neighbors_coord = []\nfor i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n if i != 0 or j != 0 or k != 0:\n all_neighbors_coord.append((i, j, k))\n\n\ndef add_coord(c1, c2):\n return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\ninp = 'test.txt'\nif len(sys.argv) == 2:\n inp = sys.argv[1]\nworld = parse_world([r.strip() for r in open(inp, 'r').readlines()])\nl = life(world)\nl.print()\nl.run(6)\nprint(l.count_active())\n", "step-5": "#!/usr/bin/env python3\n\nimport sys\n\nall_neighbors_coord = []\nfor i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n if i != 0 or j != 0 or k != 0:\n all_neighbors_coord.append((i, j, k))\n\ndef add_coord(c1, c2):\n return (c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2])\n\nclass life:\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return ((min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs)))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n else:\n if self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0]-1,ws[1][0]+2):\n for j in range(ws[0][1]-1,ws[1][1]+2):\n for k in range(ws[0][2]-1,ws[1][2]+2):\n self.get_next_square_state((i,j,k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2]+1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1]+1):\n s = ''\n for i in range(ws[0][0], ws[1][0]+1):\n if self.is_active((i,j,k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[(i,j,k)] = '#'\n return world\n\ninp = 'test.txt'\nif len(sys.argv) == 2:\n inp = sys.argv[1]\n\nworld = parse_world([r.strip() for r in open(inp, 'r').readlines()])\n\nl = life(world)\nl.print()\nl.run(6)\nprint(l.count_active())\n", "step-ids": [ 10, 11, 12, 14, 16 ] }
[ 10, 11, 12, 14, 16 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def validate_locale(locale: t.Union[Locale, str]) ->Locale: if isinstance(locale, str): try: return Locale(locale) except ValueError: raise LocaleError(locale) if not isinstance(locale, Locale): raise LocaleError(locale) return locale <|reserved_special_token_1|> <|reserved_special_token_0|> __all__ = ['Locale', 'validate_locale'] def validate_locale(locale: t.Union[Locale, str]) ->Locale: if isinstance(locale, str): try: return Locale(locale) except ValueError: raise LocaleError(locale) if not isinstance(locale, Locale): raise LocaleError(locale) return locale <|reserved_special_token_1|> <|reserved_special_token_0|> import typing as t from mimesis.enums import Locale from mimesis.exceptions import LocaleError __all__ = ['Locale', 'validate_locale'] def validate_locale(locale: t.Union[Locale, str]) ->Locale: if isinstance(locale, str): try: return Locale(locale) except ValueError: raise LocaleError(locale) if not isinstance(locale, Locale): raise LocaleError(locale) return locale <|reserved_special_token_1|> """This module provides constants for locale-dependent providers.""" import typing as t from mimesis.enums import Locale from mimesis.exceptions import LocaleError __all__ = ["Locale", "validate_locale"] def validate_locale(locale: t.Union[Locale, str]) -> Locale: if isinstance(locale, str): try: return Locale(locale) except ValueError: raise LocaleError(locale) if not isinstance(locale, Locale): raise LocaleError(locale) return locale
flexible
{ "blob_id": "779445aa22145d5076940ea5b214c25ad233dd0e", "index": 3087, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-3": "<mask token>\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-4": "<mask token>\nimport typing as t\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n", "step-5": "\"\"\"This module provides constants for locale-dependent providers.\"\"\"\n\nimport typing as t\n\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n\n__all__ = [\"Locale\", \"validate_locale\"]\n\n\ndef validate_locale(locale: t.Union[Locale, str]) -> Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n\n return locale\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django import forms from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm from .models import Upload class DocumentForm(forms.ModelForm): class Meta: model = Upload fields = ('document',)
normal
{ "blob_id": "e7b1ccbcbb81ff02561d858a4db54d49a2aa0f8a", "index": 6094, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass DocumentForm(forms.ModelForm):\n\n\n class Meta:\n model = Upload\n fields = 'document',\n", "step-3": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Upload\n\n\nclass DocumentForm(forms.ModelForm):\n\n\n class Meta:\n model = Upload\n fields = 'document',\n", "step-4": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Upload\n\nclass DocumentForm(forms.ModelForm):\n class Meta:\n model = Upload\n fields = ('document',)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for episode in range(num_episodes): state = env.reset() done = False rewards_current_episode = 0 for step in range(steps_per_episodes): exploration_rate_threshold = random.uniform(0, 1) if exploration_rate_threshold > exploration_rate: action = np.argmax(q_table[state, :]) else: action = env.action_space.sample() next_state, reward, done, info = env.step(action) q_table[state, action] = q_table[state, action] * (1 - learning_rate ) + learning_rate * (reward + discount_rate * np.max(q_table[ next_state, :])) state = next_state rewards_current_episode += reward if done == True: break exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate * episode) rewards_all_episodes.append(rewards_current_episode) <|reserved_special_token_0|> print('********Average reward per thousand episodes********\n') for r in rewards_per_thousand_episodes: print(count, ': ', str(sum(r / 1000))) count += 1000 print(""" ********Q-table******** """) print(q_table) <|reserved_special_token_1|> <|reserved_special_token_0|> env = gym.make('FrozenLake8x8-v0', is_slippery=False) observation = env.reset() action_space_size = env.action_space.n state_space_size = env.observation_space.n q_table = np.zeros((state_space_size, action_space_size)) num_episodes = 10000 steps_per_episodes = 100 learning_rate = 0.1 discount_rate = 0.99 exploration_rate = 1 max_exploration_rate = 1 min_exploration_rate = 0.01 exploration_decay_rate = 0.001 rewards_all_episodes = [] for episode in range(num_episodes): state = env.reset() done = False rewards_current_episode = 0 for step in range(steps_per_episodes): exploration_rate_threshold = random.uniform(0, 1) if exploration_rate_threshold > exploration_rate: action = np.argmax(q_table[state, :]) else: action = env.action_space.sample() next_state, reward, done, info = env.step(action) q_table[state, action] = q_table[state, action] * (1 - learning_rate ) + learning_rate * (reward + discount_rate * np.max(q_table[ next_state, :])) state = next_state rewards_current_episode += reward if done == True: break exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate * episode) rewards_all_episodes.append(rewards_current_episode) rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), num_episodes / 1000) count = 1000 print('********Average reward per thousand episodes********\n') for r in rewards_per_thousand_episodes: print(count, ': ', str(sum(r / 1000))) count += 1000 print(""" ********Q-table******** """) print(q_table) <|reserved_special_token_1|> import gym import numpy as np import random import time from IPython.display import clear_output env = gym.make('FrozenLake8x8-v0', is_slippery=False) observation = env.reset() action_space_size = env.action_space.n state_space_size = env.observation_space.n q_table = np.zeros((state_space_size, action_space_size)) num_episodes = 10000 steps_per_episodes = 100 learning_rate = 0.1 discount_rate = 0.99 exploration_rate = 1 max_exploration_rate = 1 min_exploration_rate = 0.01 exploration_decay_rate = 0.001 rewards_all_episodes = [] for episode in range(num_episodes): state = env.reset() done = False rewards_current_episode = 0 for step in range(steps_per_episodes): exploration_rate_threshold = random.uniform(0, 1) if exploration_rate_threshold > exploration_rate: action = np.argmax(q_table[state, :]) else: action = env.action_space.sample() next_state, reward, done, info = env.step(action) q_table[state, action] = q_table[state, action] * (1 - learning_rate ) + learning_rate * (reward + discount_rate * np.max(q_table[ next_state, :])) state = next_state rewards_current_episode += reward if done == True: break exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate * episode) rewards_all_episodes.append(rewards_current_episode) rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), num_episodes / 1000) count = 1000 print('********Average reward per thousand episodes********\n') for r in rewards_per_thousand_episodes: print(count, ': ', str(sum(r / 1000))) count += 1000 print(""" ********Q-table******** """) print(q_table) <|reserved_special_token_1|> # link https://deeplizard.com/learn/video/QK_PP_2KgGE import gym import numpy as np import random import time from IPython.display import clear_output # setup the env env = gym.make("FrozenLake8x8-v0", is_slippery=False) observation = env.reset() # setup the q-table action_space_size = env.action_space.n state_space_size = env.observation_space.n q_table = np.zeros((state_space_size, action_space_size)) #print(q_table) # instaniate hyper-parameters num_episodes = 10000 steps_per_episodes = 100 learning_rate = 0.1 discount_rate = 0.99 exploration_rate = 1 max_exploration_rate = 1 min_exploration_rate = 0.01 exploration_decay_rate = 0.001 # empty list to hold our rewards over time rewards_all_episodes = [] # main loops for episode in range(num_episodes): state = env.reset() done = False rewards_current_episode = 0 for step in range(steps_per_episodes): # exploration vs exploitation exploration_rate_threshold = random.uniform(0,1) if exploration_rate_threshold > exploration_rate: action = np.argmax(q_table[state,:]) else: action = env.action_space.sample() next_state, reward, done, info = env.step(action) #print(next_state) #print(q_table.shape) # update q-table q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :])) state = next_state rewards_current_episode += reward if done == True: break # Exploration rate decay exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode) rewards_all_episodes.append(rewards_current_episode) # Calculate and print the average reward per thousand episodes rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000) count = 1000 print("********Average reward per thousand episodes********\n") for r in rewards_per_thousand_episodes: print(count, ": ", str(sum(r/1000))) count += 1000 # Print updated Q-table print("\n\n********Q-table********\n") print(q_table)
flexible
{ "blob_id": "b791afec1c9fb214d1f3b4ec0ec67f905d96aabf", "index": 3249, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\n<mask token>\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n", "step-3": "<mask token>\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n", "step-4": "import gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n", "step-5": "# link https://deeplizard.com/learn/video/QK_PP_2KgGE\nimport gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\n\n# setup the env\nenv = gym.make(\"FrozenLake8x8-v0\", is_slippery=False)\nobservation = env.reset()\n\n# setup the q-table\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\n#print(q_table)\n\n# instaniate hyper-parameters\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\n\n# empty list to hold our rewards over time\nrewards_all_episodes = []\n \n # main loops\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n \n for step in range(steps_per_episodes):\n \n # exploration vs exploitation\n exploration_rate_threshold = random.uniform(0,1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state,:])\n else:\n action = env.action_space.sample()\n \n next_state, reward, done, info = env.step(action)\n #print(next_state)\n #print(q_table.shape)\n\n # update q-table\n q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))\n\n state = next_state\n rewards_current_episode += reward\n \n if done == True:\n break\n \n # Exploration rate decay\n exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)\n rewards_all_episodes.append(rewards_current_episode)\n\n# Calculate and print the average reward per thousand episodes\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)\ncount = 1000\n\nprint(\"********Average reward per thousand episodes********\\n\")\nfor r in rewards_per_thousand_episodes:\n print(count, \": \", str(sum(r/1000)))\n count += 1000\n\n# Print updated Q-table\nprint(\"\\n\\n********Q-table********\\n\")\nprint(q_table)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import urlparse def parse_url(url): """ Parse a url into a ParseResult() object then evolve that ParseResult() instance into an EasyUrl() object, finally return the EasyUrl() instance. """ url = urlparse.urlparse(url) #print url.__class__ return EasyUrl.EvolveParseResult(url) class EasyUrl(urlparse.ParseResult): """ Don't change the url at all, instead create a new EasyUrl() object. Use the python builtin methods to make the ParseResult() object friendlier. """ def __init__(self, url): self = parse_url(url) # returns a EasyUrl object self.initialize_attributes() # EasyUrl Methods def initialize_attributes(self): """ When creating an EasyUrl() instance through the EvolveParseResult() method, the __init__() method is never called, therefore it makes since to place our initialize code into a seperate method that we can call from both __init__() and EvolveParseResult(). """ self.host = self.netloc self.url = self.geturl() self.set_scheme_if_non('https') # The file extensions we are watching for. Either load the extensions # from a text file, or create a seperate python file contain a list # supported file extensions self.listed_file_extensions = [ '.jpg', '.bmp', '.png', '.mp3', '.mp4', '.flv', '.avi', '.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar', '.exe', '.git', '.torrent', ] # Type Boolean: True or False # Urls contain some useful information. Depending on the framework the # website is built on, a url can contain information about paths and files. # This is a glimpse of the sites computer system. Pretty Useful! self.is_file_extension = None # Does this path end as a file? #self.file_extension = self.check_for_file_extension() def set_scheme_if_non(self, scheme='http'): print self.scheme if not self.scheme: self.scheme = scheme self._set_url() def _set_url(self): """ Updates our self.url by seting it to self.geturl().""" self.url = self.geturl() # Required Methods for Third parties # - requests # - the url passed when making request must be a string (or have the find method) def find(self, *args, **kwargs): return self.url.find(*args, **kwargs) # Builtin Methods: Overriding the python builtin methods def __str__(self): return self.url def __repr__(self): return self.url # return '<EasyUrl: %s>' % self.url def __unicode__(self): return self.url # Static Methods: Call from class definition, not using an instance. # example: # Good: EasyUrl.EvolveParseresult(...) # # Bad : url = EasyUrl() # : url = url.EvolveParseresult(...) @staticmethod def EvolveParseResult(parseresult): """ url, response Take a formally (through urlparse.urlparse) constructed ParseResult() object and transform it into this EasyUrl() object. """ parseresult.__class__ = EasyUrl # This turns the the class to EasyUrl() easy_url = parseresult easy_url.initialize_attributes() return easy_url class HistoryEntry(object): """ Keeps a collapsed form of a scraper state.""" def __init__(self, url, response): self.url = url self.response = response def load_to_scraper(self, scraper): """ Delegate the parameters from this HistoryEntry() to a scraper that is passed in as an argument. """ scraper.url = self.url scraper.response = self.response scraper.load_soup() return scraper class HistoryManager(dict): """ Stores and manages HistoryEntry's from a scraper. """ def __init__(self, *history_entries): # super(HistoryEntry, self).__init__() self.load_history_entries(*history_entries) def load_history_entries(self, *entries): """ Using HistoryEntries passed through the method call, populatet request... 'stackoverflow.com' the dictionary. The key being the site name, the value is a list containing all HistoryEntry's for that site. """ # Simplified version: for entry in entries: try: self[entry.url.host] += [entry] except KeyError: self[entry.url.host] = [entry] temp_dict = {entry.url.host: [] for entry in entries} for entry in entries: temp_dict[entry.url.host] += [entry] # Update the dictionary # self.update(temp_dict) # Will override any lists with the same host name for host, entry in temp_dict.items(): #try: self[host] += [entry] #except IndexError: #self[host] = [entry] def save(self, scraper): """ Save the current state of a scraper. """ entry = HistoryEntry(scraper.url, scraper.response) self.load_history_entries(entry) #url = 'http://stackoverflow.com/' #easy_url1 = parse_url(url) #print easy_url1 #print easy_url1.__class__ #print repr(easy_url1) #print easy_url1.geturl()
normal
{ "blob_id": "0d322bdaf1bfed2b76172cc4dfb1b9af52bdc641", "index": 8264, "step-1": "import urlparse\n\n\n\n\ndef parse_url(url):\n \"\"\" \n Parse a url into a ParseResult() object then evolve that ParseResult()\n instance into an EasyUrl() object, finally return the EasyUrl() instance.\n \"\"\"\n url = urlparse.urlparse(url)\n #print url.__class__\n return EasyUrl.EvolveParseResult(url)\n\n\n\n\nclass EasyUrl(urlparse.ParseResult):\n \"\"\" \n Don't change the url at all, instead create a new EasyUrl() object.\n Use the python builtin methods to make the ParseResult() object friendlier. \n \"\"\"\n \n def __init__(self, url): \n self = parse_url(url) # returns a EasyUrl object\n self.initialize_attributes()\n\n # EasyUrl Methods\n def initialize_attributes(self):\n \"\"\" \n When creating an EasyUrl() instance through the\n EvolveParseResult() method, the __init__() method is never\n called, therefore it makes since to place our initialize code\n into a seperate method that we can call from both __init__() and\n EvolveParseResult().\n \"\"\"\n self.host = self.netloc\n self.url = self.geturl()\n\n self.set_scheme_if_non('https')\n \n # The file extensions we are watching for. Either load the extensions\n # from a text file, or create a seperate python file contain a list\n # supported file extensions\n self.listed_file_extensions = [ \n '.jpg', '.bmp', '.png',\n '.mp3', '.mp4', '.flv', '.avi',\n '.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',\n '.exe', '.git', '.torrent',\n ] \n # Type Boolean: True or False\n # Urls contain some useful information. Depending on the framework the \n # website is built on, a url can contain information about paths and files.\n # This is a glimpse of the sites computer system. Pretty Useful!\n self.is_file_extension = None # Does this path end as a file?\n #self.file_extension = self.check_for_file_extension()\n \n \n \n \n def set_scheme_if_non(self, scheme='http'):\n print self.scheme\n if not self.scheme:\n self.scheme = scheme\n self._set_url()\n\n\n\n\n\n def _set_url(self):\n \"\"\" Updates our self.url by seting it to self.geturl().\"\"\" \n self.url = self.geturl() \n \n \n # Required Methods for Third parties\n # - requests\n # - the url passed when making request must be a string (or have the find method)\n def find(self, *args, **kwargs):\n return self.url.find(*args, **kwargs)\n\n\n # Builtin Methods: Overriding the python builtin methods\n def __str__(self):\n return self.url\n \n def __repr__(self):\n return self.url\n # return '<EasyUrl: %s>' % self.url\n \n def __unicode__(self):\n return self.url\n\n # Static Methods: Call from class definition, not using an instance.\n # example: \n # Good: EasyUrl.EvolveParseresult(...)\n #\n # Bad : url = EasyUrl()\n # : url = url.EvolveParseresult(...)\n @staticmethod\n def EvolveParseResult(parseresult):\n \"\"\" url, response\n Take a formally (through urlparse.urlparse) constructed\n ParseResult() object and transform it into this EasyUrl() object.\n \"\"\"\n parseresult.__class__ = EasyUrl # This turns the the class to EasyUrl()\n \n easy_url = parseresult\n easy_url.initialize_attributes()\n return easy_url\n \n \n\nclass HistoryEntry(object):\n \"\"\" Keeps a collapsed form of a scraper state.\"\"\"\n \n def __init__(self, url, response):\n self.url = url\n self.response = response\n \n def load_to_scraper(self, scraper):\n \"\"\" \n Delegate the parameters from this HistoryEntry()\n to a scraper that is passed in as an argument.\n \"\"\"\n scraper.url = self.url\n scraper.response = self.response\n scraper.load_soup()\n return scraper\n\n\nclass HistoryManager(dict):\n \"\"\" Stores and manages HistoryEntry's from a scraper. \"\"\"\n\n def __init__(self, *history_entries):\n# super(HistoryEntry, self).__init__()\n self.load_history_entries(*history_entries)\n\n \n def load_history_entries(self, *entries):\n \"\"\" \n Using HistoryEntries passed through the method call,\n populatet request...\n'stackoverflow.com' the dictionary. The key being the site name, the\n value is a list containing all HistoryEntry's for that site.\n \"\"\"\n # Simplified version:\n for entry in entries:\n try:\n self[entry.url.host] += [entry]\n except KeyError:\n self[entry.url.host] = [entry]\n \n \n temp_dict = {entry.url.host: [] for entry in entries} \n for entry in entries:\n temp_dict[entry.url.host] += [entry]\n\n # Update the dictionary\n # self.update(temp_dict) # Will override any lists with the same host name\n for host, entry in temp_dict.items():\n #try:\n self[host] += [entry]\n #except IndexError:\n #self[host] = [entry]\n \n \n \n def save(self, scraper):\n \"\"\" Save the current state of a scraper. \"\"\"\n entry = HistoryEntry(scraper.url, scraper.response)\n self.load_history_entries(entry)\n \n \n \n#url = 'http://stackoverflow.com/'\n\n#easy_url1 = parse_url(url)\n#print easy_url1\n#print easy_url1.__class__\n#print repr(easy_url1)\n#print easy_url1.geturl()\n \n \n \n \n \n \n \n \n ", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids <|reserved_special_token_0|> def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids <|reserved_special_token_1|> <|reserved_special_token_0|> def user_present(username): if User.objects.filter(username=username).count(): return True return False <|reserved_special_token_0|> def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids def sort_tv_id(tvs, titles): ids = [] for i in titles: try: tv_id = TVDB.objects.get(title=i).id ids.append((i, tv_id)) except TVDB.DoesNotExist: return [] return ids def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids <|reserved_special_token_1|> <|reserved_special_token_0|> def user_present(username): if User.objects.filter(username=username).count(): return True return False def sort_title(movies): titles = [] for i in movies: titles.append(str(i.title)) titles.sort() return titles def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids def sort_tv_id(tvs, titles): ids = [] for i in titles: try: tv_id = TVDB.objects.get(title=i).id ids.append((i, tv_id)) except TVDB.DoesNotExist: return [] return ids def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids <|reserved_special_token_1|> from django.contrib.auth.models import User from rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB def user_present(username): if User.objects.filter(username=username).count(): return True return False def sort_title(movies): titles = [] for i in movies: titles.append(str(i.title)) titles.sort() return titles def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids def sort_tv_id(tvs, titles): ids = [] for i in titles: try: tv_id = TVDB.objects.get(title=i).id ids.append((i, tv_id)) except TVDB.DoesNotExist: return [] return ids def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids <|reserved_special_token_1|> from django.contrib.auth.models import User from rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB def user_present(username): if User.objects.filter(username=username).count(): return True return False #Takes in a list of MovieDB/TVDB objects #Outputs a list of sorted titles def sort_title(movies): titles = [] for i in movies: titles.append(str(i.title)) titles.sort() return titles #Takes a list of MovieDB objects and their titles as Strings #Output a list of tuples containing the (title, id) def sort_id(movies, titles): ids = [] for i in titles: try: movie_id = MovieDB.objects.get(title=i).id ids.append((i, movie_id)) except MovieDB.DoesNotExist: return [] return ids def sort_tv_id(tvs, titles): ids = [] for i in titles: try: tv_id = TVDB.objects.get(title=i).id ids.append((i, tv_id)) except TVDB.DoesNotExist: return [] return ids def sort_name(actors): names = [] for i in actors: names.append(str(i.name)) names.sort() return names def sort_actor_id(actors, names): ids = [] for i in names: try: actor_id = ActorDB.objects.get(name=i).id ids.append((i, actor_id)) except ActorDB.DoesNotExist: return [] return ids
flexible
{ "blob_id": "1e84b28580b97e77394be0490f3d8db3d62a2ccb", "index": 1213, "step-1": "<mask token>\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\n<mask token>\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-2": "<mask token>\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\n<mask token>\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-3": "<mask token>\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\ndef sort_title(movies):\n titles = []\n for i in movies:\n titles.append(str(i.title))\n titles.sort()\n return titles\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-4": "from django.contrib.auth.models import User\nfrom rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB\n\n\ndef user_present(username):\n if User.objects.filter(username=username).count():\n return True\n return False\n\n\ndef sort_title(movies):\n titles = []\n for i in movies:\n titles.append(str(i.title))\n titles.sort()\n return titles\n\n\ndef sort_id(movies, titles):\n ids = []\n for i in titles:\n try:\n movie_id = MovieDB.objects.get(title=i).id\n ids.append((i, movie_id))\n except MovieDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_tv_id(tvs, titles):\n ids = []\n for i in titles:\n try:\n tv_id = TVDB.objects.get(title=i).id\n ids.append((i, tv_id))\n except TVDB.DoesNotExist:\n return []\n return ids\n\n\ndef sort_name(actors):\n names = []\n for i in actors:\n names.append(str(i.name))\n names.sort()\n return names\n\n\ndef sort_actor_id(actors, names):\n ids = []\n for i in names:\n try:\n actor_id = ActorDB.objects.get(name=i).id\n ids.append((i, actor_id))\n except ActorDB.DoesNotExist:\n return []\n return ids\n", "step-5": "from django.contrib.auth.models import User\nfrom rt.models import Movie_Suggestion, MovieDB, ActorDB, TVDB\n\ndef user_present(username):\n\tif User.objects.filter(username=username).count():\n\t\treturn True\t\n\treturn False\n\t\n#Takes in a list of MovieDB/TVDB objects\n#Outputs a list of sorted titles\ndef sort_title(movies):\n\ttitles = []\n\tfor i in movies:\n\t\ttitles.append(str(i.title))\n\ttitles.sort()\n\treturn titles\n\t\n#Takes a list of MovieDB objects and their titles as Strings\n#Output a list of tuples containing the (title, id)\ndef sort_id(movies, titles):\n\tids = []\n\tfor i in titles:\n\t\ttry:\n\t\t\tmovie_id = MovieDB.objects.get(title=i).id\n\t\t\tids.append((i, movie_id))\n\t\texcept MovieDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n\ndef sort_tv_id(tvs, titles):\n\tids = []\n\tfor i in titles:\n\t\ttry:\n\t\t\ttv_id = TVDB.objects.get(title=i).id\n\t\t\tids.append((i, tv_id))\n\t\texcept TVDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n\ndef sort_name(actors):\n\tnames = []\n\tfor i in actors:\n\t\tnames.append(str(i.name))\n\tnames.sort()\n\treturn names\n\t\ndef sort_actor_id(actors, names):\n\tids = []\n\tfor i in names:\n\t\ttry:\n\t\t\tactor_id = ActorDB.objects.get(name=i).id\n\t\t\tids.append((i, actor_id))\n\t\texcept ActorDB.DoesNotExist:\n\t\t\treturn []\n\treturn ids\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.28 on 2020-07-10 02:52 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('civictechprojects', '0036_auto_20200708_2251'), ] operations = [ migrations.AddField( model_name='projectrelationship', name='introduction_text', field=models.CharField(blank=True, max_length=10000), ), ]
normal
{ "blob_id": "99154212d8d5fdb92cd972c727791158d09e3e2c", "index": 3789, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('civictechprojects', '0036_auto_20200708_2251')]\n operations = [migrations.AddField(model_name='projectrelationship',\n name='introduction_text', field=models.CharField(blank=True,\n max_length=10000))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('civictechprojects', '0036_auto_20200708_2251')]\n operations = [migrations.AddField(model_name='projectrelationship',\n name='introduction_text', field=models.CharField(blank=True,\n max_length=10000))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.28 on 2020-07-10 02:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('civictechprojects', '0036_auto_20200708_2251'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='projectrelationship',\n name='introduction_text',\n field=models.CharField(blank=True, max_length=10000),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class CoursesDetailView(DetailView): <|reserved_special_token_0|> <|reserved_special_token_0|> def get_context_data(self, *args, object_list=None, **kwargs): context = super(CoursesDetailView, self).get_context_data(*args, ** kwargs) print(context) return context <|reserved_special_token_1|> <|reserved_special_token_0|> class CoursesDetailView(DetailView): queryset = Course.objects.all() template_name = 'courses/course.html' def get_context_data(self, *args, object_list=None, **kwargs): context = super(CoursesDetailView, self).get_context_data(*args, ** kwargs) print(context) return context <|reserved_special_token_1|> <|reserved_special_token_0|> def courses_list_view(request): products = Course.objects.all() title = 'دوره ها' context = {'object_list': products, 'title': title} return render(request, 'courses/courses_list.html', context) class CoursesDetailView(DetailView): queryset = Course.objects.all() template_name = 'courses/course.html' def get_context_data(self, *args, object_list=None, **kwargs): context = super(CoursesDetailView, self).get_context_data(*args, ** kwargs) print(context) return context <|reserved_special_token_1|> from django.shortcuts import render from django.views.generic import DetailView from .models import Course def courses_list_view(request): products = Course.objects.all() title = 'دوره ها' context = {'object_list': products, 'title': title} return render(request, 'courses/courses_list.html', context) class CoursesDetailView(DetailView): queryset = Course.objects.all() template_name = 'courses/course.html' def get_context_data(self, *args, object_list=None, **kwargs): context = super(CoursesDetailView, self).get_context_data(*args, ** kwargs) print(context) return context <|reserved_special_token_1|> from django.shortcuts import render from django.views.generic import DetailView from .models import Course # Create your views here. def courses_list_view(request): products = Course.objects.all() title = "دوره ها" context = { "object_list": products, "title": title, } return render(request, "courses/courses_list.html", context) class CoursesDetailView(DetailView): queryset = Course.objects.all() template_name = "courses/course.html" def get_context_data(self, *args, object_list=None, **kwargs): context = super(CoursesDetailView, self).get_context_data(*args, **kwargs) print(context) return context
flexible
{ "blob_id": "aaa9665ac6d639e681fddd032058f490ce36d12a", "index": 7684, "step-1": "<mask token>\n\n\nclass CoursesDetailView(DetailView):\n <mask token>\n <mask token>\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n", "step-2": "<mask token>\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n", "step-3": "<mask token>\n\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = 'دوره ها'\n context = {'object_list': products, 'title': title}\n return render(request, 'courses/courses_list.html', context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n", "step-4": "from django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom .models import Course\n\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = 'دوره ها'\n context = {'object_list': products, 'title': title}\n return render(request, 'courses/courses_list.html', context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = 'courses/course.html'\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **\n kwargs)\n print(context)\n return context\n", "step-5": "from django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom .models import Course\n\n\n# Create your views here.\n\ndef courses_list_view(request):\n products = Course.objects.all()\n title = \"دوره ها\"\n context = {\n \"object_list\": products,\n \"title\": title,\n }\n\n\n return render(request, \"courses/courses_list.html\", context)\n\n\nclass CoursesDetailView(DetailView):\n queryset = Course.objects.all()\n template_name = \"courses/course.html\"\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super(CoursesDetailView, self).get_context_data(*args, **kwargs)\n print(context)\n return context\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class DataGenerator(IterableDataset): <|reserved_special_token_0|> <|reserved_special_token_0|> class CrossEncoderModel(torch.nn.Module): def __init__(self): super(CrossEncoderModel, self).__init__() self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.hidden = nn.Linear(768, 512) self.out = nn.Linear(512, 1) def forward(self, tensor_in, sep_token_id=102): positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1) positive_pairs[:, 256] = sep_token_id negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1) negative_pairs[:, 256] = sep_token_id positive_labels = torch.ones(len(positive_pairs), dtype=torch. float32, device=tensor_in.device) negative_labels = torch.zeros_like(positive_labels) labels = torch.cat([positive_labels, negative_labels]) inputs = torch.cat([positive_pairs, negative_pairs], dim=0) assert len(labels) == inputs.shape[0] out = self.bert(inputs)[0] out = out.mean(dim=1, keepdims=False) out = F.gelu(self.hidden(out)) out = torch.squeeze(self.out(out)) loss = F.binary_cross_entropy_with_logits(out, labels) return loss <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class DataGenerator(IterableDataset): def __init__(self, memmap_directory, apikey_weighted_df): super(DataGenerator, self).__init__() self.data_generator = training_generator(memmap_directory, apikey_weighted_df) def __iter__(self): return self.data_generator class CrossEncoderModel(torch.nn.Module): def __init__(self): super(CrossEncoderModel, self).__init__() self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.hidden = nn.Linear(768, 512) self.out = nn.Linear(512, 1) def forward(self, tensor_in, sep_token_id=102): positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1) positive_pairs[:, 256] = sep_token_id negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1) negative_pairs[:, 256] = sep_token_id positive_labels = torch.ones(len(positive_pairs), dtype=torch. float32, device=tensor_in.device) negative_labels = torch.zeros_like(positive_labels) labels = torch.cat([positive_labels, negative_labels]) inputs = torch.cat([positive_pairs, negative_pairs], dim=0) assert len(labels) == inputs.shape[0] out = self.bert(inputs)[0] out = out.mean(dim=1, keepdims=False) out = F.gelu(self.hidden(out)) out = torch.squeeze(self.out(out)) loss = F.binary_cross_entropy_with_logits(out, labels) return loss def main(): batch_size = 16 batches_per_epoch = 2 ** 19 // batch_size eval_batches_per_epoch = 2 ** 18 // batch_size save_path = Path('model.save') train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys( MEMMAP_DIRECTORY) debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000') train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) model = CrossEncoderModel().cuda() model_params = model.parameters() optimizer = torch.optim.Adam(model_params, lr=0.0001) if save_path.is_file(): print('Loading state...') checkpoint = torch.load(str(save_path)) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] + 1 else: start_epoch = 0 for epoch in range(start_epoch, 60): with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Epoch {epoch}') bar_loss = 0.0 model.train() optimizer.zero_grad() for i, batch in enumerate(train_loader): batch = batch.cuda() loss = model(batch) loss.backward() optimizer.step() bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == batches_per_epoch - 1: break with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Eval epoch {epoch}') bar_loss = 0.0 model.eval() with torch.no_grad(): for i, batch in enumerate(test_loader): batch = batch.cuda() loss = model(batch) bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == eval_batches_per_epoch - 1: break torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, str(save_path)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> MEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl') BATCHES_PER_EPOCH = 8192 class DataGenerator(IterableDataset): def __init__(self, memmap_directory, apikey_weighted_df): super(DataGenerator, self).__init__() self.data_generator = training_generator(memmap_directory, apikey_weighted_df) def __iter__(self): return self.data_generator class CrossEncoderModel(torch.nn.Module): def __init__(self): super(CrossEncoderModel, self).__init__() self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.hidden = nn.Linear(768, 512) self.out = nn.Linear(512, 1) def forward(self, tensor_in, sep_token_id=102): positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1) positive_pairs[:, 256] = sep_token_id negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1) negative_pairs[:, 256] = sep_token_id positive_labels = torch.ones(len(positive_pairs), dtype=torch. float32, device=tensor_in.device) negative_labels = torch.zeros_like(positive_labels) labels = torch.cat([positive_labels, negative_labels]) inputs = torch.cat([positive_pairs, negative_pairs], dim=0) assert len(labels) == inputs.shape[0] out = self.bert(inputs)[0] out = out.mean(dim=1, keepdims=False) out = F.gelu(self.hidden(out)) out = torch.squeeze(self.out(out)) loss = F.binary_cross_entropy_with_logits(out, labels) return loss def main(): batch_size = 16 batches_per_epoch = 2 ** 19 // batch_size eval_batches_per_epoch = 2 ** 18 // batch_size save_path = Path('model.save') train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys( MEMMAP_DIRECTORY) debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000') train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) model = CrossEncoderModel().cuda() model_params = model.parameters() optimizer = torch.optim.Adam(model_params, lr=0.0001) if save_path.is_file(): print('Loading state...') checkpoint = torch.load(str(save_path)) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] + 1 else: start_epoch = 0 for epoch in range(start_epoch, 60): with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Epoch {epoch}') bar_loss = 0.0 model.train() optimizer.zero_grad() for i, batch in enumerate(train_loader): batch = batch.cuda() loss = model(batch) loss.backward() optimizer.step() bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == batches_per_epoch - 1: break with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Eval epoch {epoch}') bar_loss = 0.0 model.eval() with torch.no_grad(): for i, batch in enumerate(test_loader): batch = batch.cuda() loss = model(batch) bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == eval_batches_per_epoch - 1: break torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, str(save_path)) if __name__ == '__main__': main() <|reserved_special_token_1|> from torch.utils.data import IterableDataset, DataLoader from torch import nn from torch.nn import functional as F from triplet_training_generator import get_train_test_apikeys, training_generator from pathlib import Path from transformers import AutoModel import torch from tqdm import tqdm import pandas as pd MEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl') BATCHES_PER_EPOCH = 8192 class DataGenerator(IterableDataset): def __init__(self, memmap_directory, apikey_weighted_df): super(DataGenerator, self).__init__() self.data_generator = training_generator(memmap_directory, apikey_weighted_df) def __iter__(self): return self.data_generator class CrossEncoderModel(torch.nn.Module): def __init__(self): super(CrossEncoderModel, self).__init__() self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.hidden = nn.Linear(768, 512) self.out = nn.Linear(512, 1) def forward(self, tensor_in, sep_token_id=102): positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1) positive_pairs[:, 256] = sep_token_id negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1) negative_pairs[:, 256] = sep_token_id positive_labels = torch.ones(len(positive_pairs), dtype=torch. float32, device=tensor_in.device) negative_labels = torch.zeros_like(positive_labels) labels = torch.cat([positive_labels, negative_labels]) inputs = torch.cat([positive_pairs, negative_pairs], dim=0) assert len(labels) == inputs.shape[0] out = self.bert(inputs)[0] out = out.mean(dim=1, keepdims=False) out = F.gelu(self.hidden(out)) out = torch.squeeze(self.out(out)) loss = F.binary_cross_entropy_with_logits(out, labels) return loss def main(): batch_size = 16 batches_per_epoch = 2 ** 19 // batch_size eval_batches_per_epoch = 2 ** 18 // batch_size save_path = Path('model.save') train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys( MEMMAP_DIRECTORY) debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000') train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) model = CrossEncoderModel().cuda() model_params = model.parameters() optimizer = torch.optim.Adam(model_params, lr=0.0001) if save_path.is_file(): print('Loading state...') checkpoint = torch.load(str(save_path)) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] + 1 else: start_epoch = 0 for epoch in range(start_epoch, 60): with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Epoch {epoch}') bar_loss = 0.0 model.train() optimizer.zero_grad() for i, batch in enumerate(train_loader): batch = batch.cuda() loss = model(batch) loss.backward() optimizer.step() bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == batches_per_epoch - 1: break with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f'Eval epoch {epoch}') bar_loss = 0.0 model.eval() with torch.no_grad(): for i, batch in enumerate(test_loader): batch = batch.cuda() loss = model(batch) bar.update(1) bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1) bar.set_postfix_str(f'Loss: {bar_loss:.3f}') if i == eval_batches_per_epoch - 1: break torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, str(save_path)) if __name__ == '__main__': main() <|reserved_special_token_1|> from torch.utils.data import IterableDataset, DataLoader from torch import nn from torch.nn import functional as F from triplet_training_generator import get_train_test_apikeys, training_generator from pathlib import Path from transformers import AutoModel import torch from tqdm import tqdm import pandas as pd MEMMAP_DIRECTORY = Path("/media/data/tokenized_crawl") BATCHES_PER_EPOCH = 8192 class DataGenerator(IterableDataset): def __init__(self, memmap_directory, apikey_weighted_df): super(DataGenerator, self).__init__() self.data_generator = training_generator(memmap_directory, apikey_weighted_df) def __iter__(self): return self.data_generator class CrossEncoderModel(torch.nn.Module): def __init__(self): super(CrossEncoderModel, self).__init__() # We need to make sure this matches the model we tokenized for! # self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.bert = AutoModel.from_pretrained('distilbert-base-cased') self.hidden = nn.Linear(768, 512) self.out = nn.Linear(512, 1) # self.out = torch.nn.Linear(768, 768, bias=False) def forward(self, tensor_in, sep_token_id=102): positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1) positive_pairs[:, 256] = sep_token_id negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1) negative_pairs[:, 256] = sep_token_id positive_labels = torch.ones(len(positive_pairs), dtype=torch.float32, device=tensor_in.device) negative_labels = torch.zeros_like(positive_labels) labels = torch.cat([positive_labels, negative_labels]) inputs = torch.cat([positive_pairs, negative_pairs], dim=0) assert len(labels) == inputs.shape[0] out = self.bert(inputs)[0] # out = out[:, 0, :] # CLS token out = out.mean(dim=1, keepdims=False) # Mean pooling out = F.gelu(self.hidden(out)) out = torch.squeeze(self.out(out)) loss = F.binary_cross_entropy_with_logits(out, labels) return loss def main(): batch_size = 16 batches_per_epoch = (2 ** 19) // batch_size eval_batches_per_epoch = (2 ** 18) // batch_size save_path = Path('model.save') train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(MEMMAP_DIRECTORY) debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000') train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys) test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1) model = CrossEncoderModel().cuda() # Diverges or just outputs the same vector for all samples at higher LRs model_params = model.parameters() optimizer = torch.optim.Adam(model_params, lr=1e-4) if save_path.is_file(): print("Loading state...") checkpoint = torch.load(str(save_path)) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) start_epoch = checkpoint['epoch'] + 1 else: start_epoch = 0 for epoch in range(start_epoch, 60): with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f"Epoch {epoch}") bar_loss = 0. model.train() optimizer.zero_grad() for i, batch in enumerate(train_loader): batch = batch.cuda() loss = model(batch) loss.backward() optimizer.step() bar.update(1) bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss bar.set_postfix_str(f"Loss: {bar_loss:.3f}") if i == batches_per_epoch - 1: break with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar: bar.set_description(f"Eval epoch {epoch}") bar_loss = 0. model.eval() with torch.no_grad(): for i, batch in enumerate(test_loader): batch = batch.cuda() loss = model(batch) bar.update(1) bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss bar.set_postfix_str(f"Loss: {bar_loss:.3f}") if i == eval_batches_per_epoch - 1: break torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, str(save_path)) if __name__ == '__main__': main()
flexible
{ "blob_id": "650f00dd9740d62546eb58724e6e5a74398b3e59", "index": 2522, "step-1": "<mask token>\n\n\nclass DataGenerator(IterableDataset):\n <mask token>\n <mask token>\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\n<mask token>\n", "step-3": "<mask token>\nMEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl')\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from torch.utils.data import IterableDataset, DataLoader\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom triplet_training_generator import get_train_test_apikeys, training_generator\nfrom pathlib import Path\nfrom transformers import AutoModel\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\nMEMMAP_DIRECTORY = Path('/media/data/tokenized_crawl')\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory,\n apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.\n float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n out = out.mean(dim=1, keepdims=False)\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = 2 ** 19 // batch_size\n eval_batches_per_epoch = 2 ** 18 // batch_size\n save_path = Path('model.save')\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(\n MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys,\n test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size,\n pin_memory=True, num_workers=1)\n model = CrossEncoderModel().cuda()\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=0.0001)\n if save_path.is_file():\n print('Loading state...')\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Epoch {epoch}')\n bar_loss = 0.0\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f'Eval epoch {epoch}')\n bar_loss = 0.0\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = (bar_loss * i + float(loss.detach())) / (i + 1)\n bar.set_postfix_str(f'Loss: {bar_loss:.3f}')\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()}, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "from torch.utils.data import IterableDataset, DataLoader\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom triplet_training_generator import get_train_test_apikeys, training_generator\nfrom pathlib import Path\nfrom transformers import AutoModel\nimport torch\nfrom tqdm import tqdm\nimport pandas as pd\n\nMEMMAP_DIRECTORY = Path(\"/media/data/tokenized_crawl\")\nBATCHES_PER_EPOCH = 8192\n\n\nclass DataGenerator(IterableDataset):\n def __init__(self, memmap_directory, apikey_weighted_df):\n super(DataGenerator, self).__init__()\n self.data_generator = training_generator(memmap_directory, apikey_weighted_df)\n\n def __iter__(self):\n return self.data_generator\n\n\nclass CrossEncoderModel(torch.nn.Module):\n def __init__(self):\n super(CrossEncoderModel, self).__init__()\n # We need to make sure this matches the model we tokenized for!\n # self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.bert = AutoModel.from_pretrained('distilbert-base-cased')\n self.hidden = nn.Linear(768, 512)\n self.out = nn.Linear(512, 1)\n # self.out = torch.nn.Linear(768, 768, bias=False)\n\n def forward(self, tensor_in, sep_token_id=102):\n positive_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 1]], dim=1)\n positive_pairs[:, 256] = sep_token_id\n negative_pairs = torch.cat([tensor_in[:, 0], tensor_in[:, 2]], dim=1)\n negative_pairs[:, 256] = sep_token_id\n positive_labels = torch.ones(len(positive_pairs), dtype=torch.float32, device=tensor_in.device)\n negative_labels = torch.zeros_like(positive_labels)\n labels = torch.cat([positive_labels, negative_labels])\n inputs = torch.cat([positive_pairs, negative_pairs], dim=0)\n assert len(labels) == inputs.shape[0]\n out = self.bert(inputs)[0]\n # out = out[:, 0, :] # CLS token\n out = out.mean(dim=1, keepdims=False) # Mean pooling\n out = F.gelu(self.hidden(out))\n out = torch.squeeze(self.out(out))\n loss = F.binary_cross_entropy_with_logits(out, labels)\n return loss\n\n\ndef main():\n batch_size = 16\n batches_per_epoch = (2 ** 19) // batch_size\n eval_batches_per_epoch = (2 ** 18) // batch_size\n save_path = Path('model.save')\n\n train_weighted_apikeys, test_weighted_apikeys = get_train_test_apikeys(MEMMAP_DIRECTORY)\n debug_weighted_apikeys = pd.concat([train_weighted_apikeys, test_weighted_apikeys]).query('num_posts > 1000000')\n train_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n train_loader = DataLoader(train_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)\n test_dataset = DataGenerator(MEMMAP_DIRECTORY, debug_weighted_apikeys)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, pin_memory=True, num_workers=1)\n\n model = CrossEncoderModel().cuda()\n # Diverges or just outputs the same vector for all samples at higher LRs\n model_params = model.parameters()\n optimizer = torch.optim.Adam(model_params, lr=1e-4)\n if save_path.is_file():\n print(\"Loading state...\")\n checkpoint = torch.load(str(save_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch'] + 1\n else:\n start_epoch = 0\n for epoch in range(start_epoch, 60):\n with tqdm(total=batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f\"Epoch {epoch}\")\n bar_loss = 0.\n model.train()\n optimizer.zero_grad()\n for i, batch in enumerate(train_loader):\n batch = batch.cuda()\n loss = model(batch)\n loss.backward()\n optimizer.step()\n bar.update(1)\n bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss\n bar.set_postfix_str(f\"Loss: {bar_loss:.3f}\")\n if i == batches_per_epoch - 1:\n break\n with tqdm(total=eval_batches_per_epoch, dynamic_ncols=True) as bar:\n bar.set_description(f\"Eval epoch {epoch}\")\n bar_loss = 0.\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(test_loader):\n batch = batch.cuda()\n loss = model(batch)\n bar.update(1)\n bar_loss = ((bar_loss * i) + float(loss.detach())) / (i + 1) # Rolling mean loss\n bar.set_postfix_str(f\"Loss: {bar_loss:.3f}\")\n if i == eval_batches_per_epoch - 1:\n break\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, str(save_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 4, 7, 9, 10, 11 ] }
[ 4, 7, 9, 10, 11 ]
import copy import sys import os from datetime import datetime,timedelta from dateutil.relativedelta import relativedelta import numpy as np import pandas import tsprocClass as tc import pestUtil as pu #update parameter values and fixed/unfixed #--since Joe is so pro-America... tc.DATE_FMT = '%m/%d/%Y' #--build a list of template and model-equivalent files tpl_dir = 'tpl\\' modin_dir = 'par\\' tpl_files,modin_files = [],[] files = os.listdir(modin_dir) for file in files: modin_files.append(modin_dir+file) tpl_files.append(tpl_dir+file.split('.')[0]+'.tpl') modin_files.append('UMD.03\\SWRREF\\SWR_Dataset11.ref') tpl_files.append('tpl\\SWR_Dataset11.tpl') #--start and end model_start = datetime(1996,1,1,hour=12) obs_start = datetime(1997,1,1,hour=12) #obs_end = datetime(2010,12,31,hour=12) obs_end = datetime(year=1999,month=12,day=12,hour=12) obs_start_str = obs_start.strftime(tc.DATE_FMT) obs_end_str = obs_end.strftime(tc.DATE_FMT) date_dir = 'date_files\\' #--instance tsproc_infile = 'tsproc_setup.dat' tsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long') pest_oblocks,pest_mblocks = [],[] #--stage sites stg_obs_file = 'UMD.03\\obsref\\stage\\All_DBHYDRO_stage.smp' stg_reach_file = 'setup_files\\UMD.03_StageStats.csv' f = open(stg_reach_file,'r') reach_dict = {} header = f.readline() for line in f: raw = line.strip().split(',') name = raw[0].upper().replace(' ','_').replace('-','') if name.endswith('W'): name = name[:-1] reach_dict[name] = int(raw[1]) f.close() #parser = lambda x: datetime.strptime(x,tc.DATE_FMT+' %H:%M:%S') #stage_df = pandas.read_table(stg_obs_file,header=None,parse_dates=[[1,2]],date_parser=parser,sep='\s*') #stage_df.columns = ['datetime','site','value'] stage_smp = pu.smp(stg_obs_file,date_fmt=tc.DATE_FMT,pandas=True,load=True) stage_sites = stage_smp.records.keys() for site in stage_sites: if site not in reach_dict.keys(): print 'site not found in reach dict',site obs_names = [] mod_names = [] reach_numbers = [] smp_site_names = [] for i,site in enumerate(reach_dict.keys()): if site not in stage_sites: print 'site not found in smp file',site reach_dict.pop(site) else: obs_names.append('ost_{0:03.0f}or'.format(i+1)) mod_names.append('mst_{0:03.0f}or'.format(i+1)) reach_numbers.append(reach_dict[site]) smp_site_names.append(site) mblocks = tsp.get_mul_series_swr(reach_numbers,None,'UMD.03\\Results\\UMD.stg',model_start,mod_names,swr_file_type='stage') oblocks = tsp.get_mul_series_ssf(reach_dict.keys(),stg_obs_file,context=tc.PEST_CONTEXT,series_list=obs_names) assert len(mblocks) == len(oblocks) #--process each head record individually because of the variable record length for i,[site,oblock,mblock] in enumerate(zip(smp_site_names,oblocks,mblocks)): oblock = [oblock] mblock = [mblock] #--get the start and end of the observed record ostart = stage_smp.records[site].dropna().index[0] oend = stage_smp.records[site].dropna().index[-1] dstart,dend = max(obs_start,ostart),min(obs_end,oend) print site,dstart,dend if dend > dstart: full_file = date_dir+site+'_stg.dat' tc.write_date_file(full_file,dstart,dend,None) uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend) biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub') #--model simulated block reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend) relative_block = tsp.drawdown(reduced_block,full_file,first=True) interp_block = tsp.new_time_base(relative_block,uniform_days) filter_block = tsp.baseflow_filter(interp_block) diff_block = tsp.difference_2_series(interp_block,filter_block) bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi') #--copy the final processed block to have the same name as the original #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0) #pest_mblocks.extend(renamed_block) renamed_block = tsp.copy_2_series(bi_block,[site],role='final',wght=100.0) pest_mblocks.extend(renamed_block) reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT) relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT) interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT) filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT) diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT) bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi') #--copy the final processed block to have the same name as the original #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT) #pest_oblocks.extend(renamed_block) renamed_block = tsp.copy_2_series(bi_block,[site+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT) pest_oblocks.extend(renamed_block) #--baseflow obs bf_obs_file = 'UMD.03\\Results\\UMDNetFlow_observed_Monthly.smp' bf_mod_file = 'UMD.03\\Results\\UMDNetFlow_simulated_Monthly.smp' bf_obs_smp = pu.smp(bf_obs_file,load=True,date_fmt=tc.DATE_FMT,pandas=True) bf_mod_smp = pu.smp(bf_mod_file,load=True,date_fmt=tc.DATE_FMT,pandas=True) bf_obs_sites = bf_obs_smp.records.keys() bf_mod_sites = bf_mod_smp.records.keys() assert len(bf_obs_sites) == len(bf_mod_sites) bf_mod_sites = [] for osite in bf_obs_sites: print osite msite = osite[:-1]+'s' assert msite in bf_mod_smp.records.keys() bf_mod_sites.append(msite) print bf_obs_smp.records[osite].shape,bf_mod_smp.records[osite[:-1]+'s'].shape obs_names = [] mod_names = [] for i,s in enumerate(bf_obs_sites): obs_names.append('obf_{0:03.0f}or'.format(i+1)) mod_names.append('mbf_{0:03.0f}or'.format(i+1)) bf_oblocks = tsp.get_series_ssf(bf_obs_sites,bf_obs_file,block_operation='load_bf_obs',series_list=obs_names,context=tc.PEST_CONTEXT) bf_mblocks = tsp.get_series_ssf(bf_mod_sites,bf_mod_file,block_operation='load_bf_mod',series_list=mod_names) time_str = '00:00:00' for mblock,oblock,site in zip(bf_mblocks,bf_oblocks,bf_obs_sites): #--baseflow accumulation date_file_name = date_dir+site+'_bf.dat' obs_df = bf_obs_smp.records[site].dropna() obs_df = obs_df[obs_start:] ostart,oend = obs_df.index[0],obs_df.index[-1] print site,ostart,oend f = open(date_file_name,'w',0) f.write(ostart.strftime(tc.DATE_FMT)+' '+time_str+' '+oend.strftime(tc.DATE_FMT)+' '+time_str+'\n') f.close() vcalc_mblock = tsp.volume_calc([mblock],date_file_name) vcalc_oblock = tsp.volume_calc([oblock],date_file_name,context=tc.PEST_CONTEXT) vser_mblock = tsp.vol_2_series(vcalc_mblock) vser_oblock = tsp.vol_2_series(vcalc_oblock,context=tc.PEST_CONTEXT) renamed_mblock = tsp.copy_2_series(vser_mblock,[site[:-1]+'p'],role='final',wght=0.0) renamed_oblock = tsp.copy_2_series(vser_oblock,[site[:-2]+'op'],role='final',wght=0.0,context=tc.PEST_CONTEXT) pest_mblocks.extend(renamed_mblock) pest_oblocks.extend(renamed_oblock) #--the raw baseflow series renamed_mblock = tsp.copy_2_series([mblock],[site[:-1]+'s'],role='final',wght=100.0) renamed_oblock = tsp.copy_2_series([oblock],[site[:-1]+'o'],role='final',wght=100.0,context=tc.PEST_CONTEXT) pest_mblocks.extend(renamed_mblock) pest_oblocks.extend(renamed_oblock) hobs_file = 'UMD.03\\obsref\\head\\heads.smp' hobs_smp = pu.smp(hobs_file,date_fmt = tc.DATE_FMT,load=True) hobs_start,hobs_end = hobs_smp.get_daterange(site_name='all',startmin=obs_start,endmax=obs_end) mobs_file = 'UMD.03\\modref\\head\\mheads.smp' mobs_smp = pu.smp(mobs_file,date_fmt = tc.DATE_FMT,load=True) site_names = hobs_smp.records.keys() #--generate base names for processing obs_names = [] mod_names = [] for i,s in enumerate(site_names): obs_names.append('ogw_{0:03.0f}or'.format(i+1)) mod_names.append('mgw_{0:03.0f}or'.format(i+1)) #--write the load series block oblocks = tsp.get_mul_series_ssf(site_names,hobs_file,block_operation='load_heads',context=tc.PEST_CONTEXT,series_list=obs_names) mblocks = tsp.get_mul_series_ssf(site_names,mobs_file,block_operation='load_heads',series_list=mod_names) #--process each head record individually because of the variable record length for i,[site_name,oblock,mblock] in enumerate(zip(site_names,oblocks,mblocks)): oblock = [oblock] mblock = [mblock] #--get the starting and end date of each record within the reduced model sim time rstart,rend = hobs_start[site_name],hobs_end[site_name] if rend > obs_start: #--find the date range for this record and write date files dstart,dend = max(obs_start,rstart),min(obs_end,rend) print site_name,dstart,dend week_file = date_dir+site_name+'_wk.dat' full_file = date_dir+site_name+'.dat' dry_file = date_dir+site_name+'_dry.dat' #tc.write_date_file(week_file,dstart+timedelta(days=7),dend-timedelta(days=7),timedelta(days=7)) tc.write_date_file(full_file,dstart,dend,None) uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend) biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub') #weekly_block = tsp.series_avg(relative_block,week_file,context=tc.PEST_CONTEXT) #--observation block reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT) relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT) interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT) filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT) diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT) bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi') #--copy the final processed block to have the same name as the original #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT) #pest_oblocks.extend(renamed_block) renamed_block = tsp.copy_2_series(bi_block,[site_name+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT) pest_oblocks.extend(renamed_block) #--model simulated block reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend) relative_block = tsp.drawdown(reduced_block,full_file,first=True) interp_block = tsp.new_time_base(relative_block,uniform_days) filter_block = tsp.baseflow_filter(interp_block) diff_block = tsp.difference_2_series(interp_block,filter_block) bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi') #--copy the final processed block to have the same name as the original #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0) #pest_mblocks.extend(renamed_block) renamed_block = tsp.copy_2_series(bi_block,[site_name],role='final',wght=100.0) pest_mblocks.extend(renamed_block) else: print 'no data for record in reduced sim time:',site_name #if i > 100: #break #--write the model run tspoc file tsp.set_context('model_run') tsp.tsproc_file = 'tsproc_model_run.dat' tsp.write_tsproc() #--write the setup tsproc file tsp.write_pest(tpl_files,modin_files,pest_oblocks,pest_mblocks,svd=True,parms='pst_components\\params.dat',parm_grp='pst_components\\param_groups.dat') tsp.set_context(tc.PEST_CONTEXT) tsp.tsproc_file = 'tsproc_setup.dat' tsp.write_tsproc() os.system('tsproc.exe <tsproc_setup.in >tsproc_screen.out') os.system('addreg1.exe pest.pst umd03.pst')
normal
{ "blob_id": "c060cdb7730ba5c4d2240b65331f5010cac222fa", "index": 8721, "step-1": "import copy\nimport sys\nimport os\nfrom datetime import datetime,timedelta\nfrom dateutil.relativedelta import relativedelta\nimport numpy as np\nimport pandas\n\nimport tsprocClass as tc \nimport pestUtil as pu \n\n#update parameter values and fixed/unfixed\n\n#--since Joe is so pro-America...\ntc.DATE_FMT = '%m/%d/%Y'\n\n#--build a list of template and model-equivalent files\ntpl_dir = 'tpl\\\\'\nmodin_dir = 'par\\\\'\ntpl_files,modin_files = [],[]\nfiles = os.listdir(modin_dir)\nfor file in files:\n modin_files.append(modin_dir+file)\n tpl_files.append(tpl_dir+file.split('.')[0]+'.tpl')\nmodin_files.append('UMD.03\\\\SWRREF\\\\SWR_Dataset11.ref')\ntpl_files.append('tpl\\\\SWR_Dataset11.tpl')\n\n#--start and end\nmodel_start = datetime(1996,1,1,hour=12)\nobs_start = datetime(1997,1,1,hour=12)\n#obs_end = datetime(2010,12,31,hour=12)\nobs_end = datetime(year=1999,month=12,day=12,hour=12)\nobs_start_str = obs_start.strftime(tc.DATE_FMT)\nobs_end_str = obs_end.strftime(tc.DATE_FMT)\n\ndate_dir = 'date_files\\\\'\n\n#--instance\ntsproc_infile = 'tsproc_setup.dat'\ntsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long')\n\npest_oblocks,pest_mblocks = [],[]\n\n#--stage sites\nstg_obs_file = 'UMD.03\\\\obsref\\\\stage\\\\All_DBHYDRO_stage.smp'\nstg_reach_file = 'setup_files\\\\UMD.03_StageStats.csv'\nf = open(stg_reach_file,'r')\nreach_dict = {}\nheader = f.readline()\nfor line in f:\n raw = line.strip().split(',')\n name = raw[0].upper().replace(' ','_').replace('-','')\n if name.endswith('W'):\n name = name[:-1]\n reach_dict[name] = int(raw[1])\nf.close()\n\n#parser = lambda x: datetime.strptime(x,tc.DATE_FMT+' %H:%M:%S')\n#stage_df = pandas.read_table(stg_obs_file,header=None,parse_dates=[[1,2]],date_parser=parser,sep='\\s*')\n#stage_df.columns = ['datetime','site','value']\n\nstage_smp = pu.smp(stg_obs_file,date_fmt=tc.DATE_FMT,pandas=True,load=True)\nstage_sites = stage_smp.records.keys()\nfor site in stage_sites:\n if site not in reach_dict.keys():\n print 'site not found in reach dict',site\n\nobs_names = []\nmod_names = []\nreach_numbers = []\nsmp_site_names = []\nfor i,site in enumerate(reach_dict.keys()):\n if site not in stage_sites:\n print 'site not found in smp file',site\n reach_dict.pop(site)\n else: \n obs_names.append('ost_{0:03.0f}or'.format(i+1)) \n mod_names.append('mst_{0:03.0f}or'.format(i+1))\n reach_numbers.append(reach_dict[site])\n smp_site_names.append(site)\nmblocks = tsp.get_mul_series_swr(reach_numbers,None,'UMD.03\\\\Results\\\\UMD.stg',model_start,mod_names,swr_file_type='stage')\noblocks = tsp.get_mul_series_ssf(reach_dict.keys(),stg_obs_file,context=tc.PEST_CONTEXT,series_list=obs_names)\n\nassert len(mblocks) == len(oblocks)\n\n\n#--process each head record individually because of the variable record length\nfor i,[site,oblock,mblock] in enumerate(zip(smp_site_names,oblocks,mblocks)): \n oblock = [oblock]\n mblock = [mblock]\n #--get the start and end of the observed record\n ostart = stage_smp.records[site].dropna().index[0]\n oend = stage_smp.records[site].dropna().index[-1] \n dstart,dend = max(obs_start,ostart),min(obs_end,oend)\n print site,dstart,dend\n if dend > dstart:\n full_file = date_dir+site+'_stg.dat'\n tc.write_date_file(full_file,dstart,dend,None)\n\n uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend)\n biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub')\n \n\n #--model simulated block \n reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend) \n relative_block = tsp.drawdown(reduced_block,full_file,first=True) \n interp_block = tsp.new_time_base(relative_block,uniform_days) \n filter_block = tsp.baseflow_filter(interp_block)\n diff_block = tsp.difference_2_series(interp_block,filter_block)\n bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi')\n\n #--copy the final processed block to have the same name as the original\n #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0)\n #pest_mblocks.extend(renamed_block)\n renamed_block = tsp.copy_2_series(bi_block,[site],role='final',wght=100.0)\n pest_mblocks.extend(renamed_block) \n\n\n reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT) \n relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT) \n interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT) \n filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT)\n diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT)\n bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi') \n \n #--copy the final processed block to have the same name as the original\n #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT)\n #pest_oblocks.extend(renamed_block)\n renamed_block = tsp.copy_2_series(bi_block,[site+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)\n pest_oblocks.extend(renamed_block) \n \n \n\n#--baseflow obs\nbf_obs_file = 'UMD.03\\\\Results\\\\UMDNetFlow_observed_Monthly.smp'\nbf_mod_file = 'UMD.03\\\\Results\\\\UMDNetFlow_simulated_Monthly.smp'\nbf_obs_smp = pu.smp(bf_obs_file,load=True,date_fmt=tc.DATE_FMT,pandas=True)\nbf_mod_smp = pu.smp(bf_mod_file,load=True,date_fmt=tc.DATE_FMT,pandas=True)\n\nbf_obs_sites = bf_obs_smp.records.keys()\nbf_mod_sites = bf_mod_smp.records.keys()\nassert len(bf_obs_sites) == len(bf_mod_sites)\nbf_mod_sites = []\nfor osite in bf_obs_sites:\n print osite\n msite = osite[:-1]+'s'\n assert msite in bf_mod_smp.records.keys()\n bf_mod_sites.append(msite)\n print bf_obs_smp.records[osite].shape,bf_mod_smp.records[osite[:-1]+'s'].shape\n \n\nobs_names = []\nmod_names = []\nfor i,s in enumerate(bf_obs_sites):\n obs_names.append('obf_{0:03.0f}or'.format(i+1)) \n mod_names.append('mbf_{0:03.0f}or'.format(i+1))\n\nbf_oblocks = tsp.get_series_ssf(bf_obs_sites,bf_obs_file,block_operation='load_bf_obs',series_list=obs_names,context=tc.PEST_CONTEXT) \nbf_mblocks = tsp.get_series_ssf(bf_mod_sites,bf_mod_file,block_operation='load_bf_mod',series_list=mod_names)\n \n\ntime_str = '00:00:00'\nfor mblock,oblock,site in zip(bf_mblocks,bf_oblocks,bf_obs_sites): \n #--baseflow accumulation\n date_file_name = date_dir+site+'_bf.dat'\n obs_df = bf_obs_smp.records[site].dropna()\n obs_df = obs_df[obs_start:]\n ostart,oend = obs_df.index[0],obs_df.index[-1] \n print site,ostart,oend\n f = open(date_file_name,'w',0)\n f.write(ostart.strftime(tc.DATE_FMT)+' '+time_str+' '+oend.strftime(tc.DATE_FMT)+' '+time_str+'\\n')\n f.close() \n vcalc_mblock = tsp.volume_calc([mblock],date_file_name)\n vcalc_oblock = tsp.volume_calc([oblock],date_file_name,context=tc.PEST_CONTEXT)\n vser_mblock = tsp.vol_2_series(vcalc_mblock)\n vser_oblock = tsp.vol_2_series(vcalc_oblock,context=tc.PEST_CONTEXT)\n renamed_mblock = tsp.copy_2_series(vser_mblock,[site[:-1]+'p'],role='final',wght=0.0)\n renamed_oblock = tsp.copy_2_series(vser_oblock,[site[:-2]+'op'],role='final',wght=0.0,context=tc.PEST_CONTEXT) \n pest_mblocks.extend(renamed_mblock) \n pest_oblocks.extend(renamed_oblock) \n\n #--the raw baseflow series\n renamed_mblock = tsp.copy_2_series([mblock],[site[:-1]+'s'],role='final',wght=100.0)\n renamed_oblock = tsp.copy_2_series([oblock],[site[:-1]+'o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)\n pest_mblocks.extend(renamed_mblock) \n pest_oblocks.extend(renamed_oblock) \n \n\nhobs_file = 'UMD.03\\\\obsref\\\\head\\\\heads.smp'\nhobs_smp = pu.smp(hobs_file,date_fmt = tc.DATE_FMT,load=True)\nhobs_start,hobs_end = hobs_smp.get_daterange(site_name='all',startmin=obs_start,endmax=obs_end)\n\nmobs_file = 'UMD.03\\\\modref\\\\head\\\\mheads.smp'\nmobs_smp = pu.smp(mobs_file,date_fmt = tc.DATE_FMT,load=True)\n\nsite_names = hobs_smp.records.keys()\n\n#--generate base names for processing\nobs_names = []\nmod_names = []\nfor i,s in enumerate(site_names):\n obs_names.append('ogw_{0:03.0f}or'.format(i+1))\n mod_names.append('mgw_{0:03.0f}or'.format(i+1))\n \n#--write the load series block\noblocks = tsp.get_mul_series_ssf(site_names,hobs_file,block_operation='load_heads',context=tc.PEST_CONTEXT,series_list=obs_names) \nmblocks = tsp.get_mul_series_ssf(site_names,mobs_file,block_operation='load_heads',series_list=mod_names) \n\n#--process each head record individually because of the variable record length\nfor i,[site_name,oblock,mblock] in enumerate(zip(site_names,oblocks,mblocks)): \n oblock = [oblock]\n mblock = [mblock]\n \n #--get the starting and end date of each record within the reduced model sim time\n rstart,rend = hobs_start[site_name],hobs_end[site_name] \n if rend > obs_start: \n #--find the date range for this record and write date files\n dstart,dend = max(obs_start,rstart),min(obs_end,rend)\n print site_name,dstart,dend\n week_file = date_dir+site_name+'_wk.dat'\n full_file = date_dir+site_name+'.dat'\n dry_file = date_dir+site_name+'_dry.dat'\n #tc.write_date_file(week_file,dstart+timedelta(days=7),dend-timedelta(days=7),timedelta(days=7))\n tc.write_date_file(full_file,dstart,dend,None) \n \n \n uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend)\n biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub')\n #weekly_block = tsp.series_avg(relative_block,week_file,context=tc.PEST_CONTEXT) \n\n #--observation block \n reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT) \n relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT) \n interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT) \n filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT)\n diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT)\n bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi') \n \n \n #--copy the final processed block to have the same name as the original\n #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT)\n #pest_oblocks.extend(renamed_block)\n renamed_block = tsp.copy_2_series(bi_block,[site_name+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)\n pest_oblocks.extend(renamed_block) \n \n\n #--model simulated block \n reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend) \n relative_block = tsp.drawdown(reduced_block,full_file,first=True) \n interp_block = tsp.new_time_base(relative_block,uniform_days) \n filter_block = tsp.baseflow_filter(interp_block)\n diff_block = tsp.difference_2_series(interp_block,filter_block)\n bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi')\n\n #--copy the final processed block to have the same name as the original\n #renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0)\n #pest_mblocks.extend(renamed_block)\n renamed_block = tsp.copy_2_series(bi_block,[site_name],role='final',wght=100.0)\n pest_mblocks.extend(renamed_block) \n\n else:\n print 'no data for record in reduced sim time:',site_name \n #if i > 100:\n #break\n\n#--write the model run tspoc file \ntsp.set_context('model_run')\ntsp.tsproc_file = 'tsproc_model_run.dat'\ntsp.write_tsproc()\n\n#--write the setup tsproc file\ntsp.write_pest(tpl_files,modin_files,pest_oblocks,pest_mblocks,svd=True,parms='pst_components\\\\params.dat',parm_grp='pst_components\\\\param_groups.dat')\ntsp.set_context(tc.PEST_CONTEXT)\ntsp.tsproc_file = 'tsproc_setup.dat'\n\ntsp.write_tsproc()\nos.system('tsproc.exe <tsproc_setup.in >tsproc_screen.out')\nos.system('addreg1.exe pest.pst umd03.pst')\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
''' Compress images ''' from PIL import Image def resizeImage(image_file): try: # get the image's width and height in pixels img = Image.open(image_file) width, height = img.size # get the largest dimension max_dim = max(img.size) if max_dim > 1000: # resize the image using the largest side as dimension factor = 1000./max_dim new_width = int(width*factor) new_height = int(height*factor) resized_image = img.resize((new_width, new_height), Image.ANTIALIAS) print width, height, new_width, new_height # save the resized image to a file # overwrite existing file resized_image_file = image_file resized_image.save(resized_image_file) # print("%s resized" % resized_image_file) except: print 'Cannot open ' + image_file # pick an image file you have in the working directory # (or give full path name) for i in range(2713): image_file = "static/images/download/%d.jpg" %(i+1) resizeImage(image_file)
normal
{ "blob_id": "1b43125c2ebffd0a268a4a0ffdbbf407de7b0374", "index": 7486, "step-1": "''' Compress images '''\n\nfrom PIL import Image\n\n\ndef resizeImage(image_file):\n\ttry:\n\t\t# get the image's width and height in pixels\n\t\timg = Image.open(image_file)\n\t\twidth, height = img.size\n\n\t\t# get the largest dimension\n\t\tmax_dim = max(img.size)\n\n\t\tif max_dim > 1000:\n\t\t\t# resize the image using the largest side as dimension\n\t\t\tfactor = 1000./max_dim\n\t\t\tnew_width = int(width*factor)\n\t\t\tnew_height = int(height*factor)\n\t\t\tresized_image = img.resize((new_width, new_height), Image.ANTIALIAS)\n\t\t\tprint width, height, new_width, new_height\n\n\t\t\t# save the resized image to a file\n\t\t\t# overwrite existing file\n\t\t\tresized_image_file = image_file\n\t\t\tresized_image.save(resized_image_file)\n\t\t\t#\n\t\t\tprint(\"%s resized\" % resized_image_file)\n\texcept:\n\t\tprint 'Cannot open ' + image_file\n\t\n\n# pick an image file you have in the working directory\n# (or give full path name)\nfor i in range(2713):\n\timage_file = \"static/images/download/%d.jpg\" %(i+1)\n\tresizeImage(image_file)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class TelaLisatrClientes: <|reserved_special_token_0|> def init_components(self, lista_clientes): layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values= lista_clientes, size=(60, 10))], [sg.Submit()]] self.__window = sg.Window('Lista de clientes').Layout(layout) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TelaLisatrClientes: def __init__(self): self.__window = None def init_components(self, lista_clientes): layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values= lista_clientes, size=(60, 10))], [sg.Submit()]] self.__window = sg.Window('Lista de clientes').Layout(layout) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TelaLisatrClientes: def __init__(self): self.__window = None def init_components(self, lista_clientes): layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values= lista_clientes, size=(60, 10))], [sg.Submit()]] self.__window = sg.Window('Lista de clientes').Layout(layout) def lista_clientes(self, lista_clientes): self.init_components(lista_clientes) button, values = self.__window.Read() self.__window.Close() return button, values <|reserved_special_token_1|> import PySimpleGUI as sg class TelaLisatrClientes: def __init__(self): self.__window = None def init_components(self, lista_clientes): layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values= lista_clientes, size=(60, 10))], [sg.Submit()]] self.__window = sg.Window('Lista de clientes').Layout(layout) def lista_clientes(self, lista_clientes): self.init_components(lista_clientes) button, values = self.__window.Read() self.__window.Close() return button, values <|reserved_special_token_1|> import PySimpleGUI as sg class TelaLisatrClientes(): def __init__(self): self.__window = None def init_components(self, lista_clientes): layout = [ [sg.Text('Dados do cliente')], [sg.Listbox(values=lista_clientes, size=(60, 10))], [sg.Submit()] ] self.__window = sg.Window('Lista de clientes').Layout(layout) def lista_clientes(self, lista_clientes): self.init_components(lista_clientes) button, values = self.__window.Read() self.__window.Close() return button, values
flexible
{ "blob_id": "624b34d160ea6db4f5249544f1614a20f506ca9e", "index": 895, "step-1": "<mask token>\n\n\nclass TelaLisatrClientes:\n <mask token>\n\n def init_components(self, lista_clientes):\n layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=\n lista_clientes, size=(60, 10))], [sg.Submit()]]\n self.__window = sg.Window('Lista de clientes').Layout(layout)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TelaLisatrClientes:\n\n def __init__(self):\n self.__window = None\n\n def init_components(self, lista_clientes):\n layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=\n lista_clientes, size=(60, 10))], [sg.Submit()]]\n self.__window = sg.Window('Lista de clientes').Layout(layout)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TelaLisatrClientes:\n\n def __init__(self):\n self.__window = None\n\n def init_components(self, lista_clientes):\n layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=\n lista_clientes, size=(60, 10))], [sg.Submit()]]\n self.__window = sg.Window('Lista de clientes').Layout(layout)\n\n def lista_clientes(self, lista_clientes):\n self.init_components(lista_clientes)\n button, values = self.__window.Read()\n self.__window.Close()\n return button, values\n", "step-4": "import PySimpleGUI as sg\n\n\nclass TelaLisatrClientes:\n\n def __init__(self):\n self.__window = None\n\n def init_components(self, lista_clientes):\n layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=\n lista_clientes, size=(60, 10))], [sg.Submit()]]\n self.__window = sg.Window('Lista de clientes').Layout(layout)\n\n def lista_clientes(self, lista_clientes):\n self.init_components(lista_clientes)\n button, values = self.__window.Read()\n self.__window.Close()\n return button, values\n", "step-5": "import PySimpleGUI as sg\n\nclass TelaLisatrClientes():\n\n def __init__(self):\n self.__window = None\n\n def init_components(self, lista_clientes):\n\n layout = [\n [sg.Text('Dados do cliente')],\n [sg.Listbox(values=lista_clientes, size=(60, 10))],\n [sg.Submit()]\n ]\n\n self.__window = sg.Window('Lista de clientes').Layout(layout)\n\n def lista_clientes(self, lista_clientes):\n\n self.init_components(lista_clientes)\n\n button, values = self.__window.Read()\n\n self.__window.Close()\n\n return button, values\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
def read_int(): return int(input().strip()) def read_ints(): return list(map(int, input().strip().split(' '))) def solve(): K, S = read_ints() # X+Y+Z = S # 0 <= X,Y,Z <= K total = 0 for X in range(K+1): if S-X < 0: break # Y+Z=S-X Y_min = max(S-X-K, 0) Y_max = min(S-X, K) if Y_min <= Y_max: total += Y_max-Y_min+1 return total if __name__ == '__main__': print(solve())
normal
{ "blob_id": "46b1fc975fbeedcafaa66c85c378e2249a495647", "index": 8827, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef read_ints():\n return list(map(int, input().strip().split(' ')))\n\n\ndef solve():\n K, S = read_ints()\n total = 0\n for X in range(K + 1):\n if S - X < 0:\n break\n Y_min = max(S - X - K, 0)\n Y_max = min(S - X, K)\n if Y_min <= Y_max:\n total += Y_max - Y_min + 1\n return total\n\n\n<mask token>\n", "step-3": "def read_int():\n return int(input().strip())\n\n\ndef read_ints():\n return list(map(int, input().strip().split(' ')))\n\n\ndef solve():\n K, S = read_ints()\n total = 0\n for X in range(K + 1):\n if S - X < 0:\n break\n Y_min = max(S - X - K, 0)\n Y_max = min(S - X, K)\n if Y_min <= Y_max:\n total += Y_max - Y_min + 1\n return total\n\n\n<mask token>\n", "step-4": "def read_int():\n return int(input().strip())\n\n\ndef read_ints():\n return list(map(int, input().strip().split(' ')))\n\n\ndef solve():\n K, S = read_ints()\n total = 0\n for X in range(K + 1):\n if S - X < 0:\n break\n Y_min = max(S - X - K, 0)\n Y_max = min(S - X, K)\n if Y_min <= Y_max:\n total += Y_max - Y_min + 1\n return total\n\n\nif __name__ == '__main__':\n print(solve())\n", "step-5": "\n\ndef read_int():\n return int(input().strip())\n\n\ndef read_ints():\n return list(map(int, input().strip().split(' ')))\n\n\ndef solve():\n K, S = read_ints()\n # X+Y+Z = S\n # 0 <= X,Y,Z <= K\n total = 0\n for X in range(K+1):\n if S-X < 0:\n break\n # Y+Z=S-X\n Y_min = max(S-X-K, 0)\n Y_max = min(S-X, K)\n if Y_min <= Y_max:\n total += Y_max-Y_min+1\n return total\n\n\nif __name__ == '__main__':\n print(solve())\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
# USAGE # python predict_video.py --model model/activity.model --label-bin model/lb.pickle --input example_clips/lifting.mp4 --output output/lifting_128avg.avi --size 128 # python predict_video.py --model model/road_activity.model --label-bin model/rd.pickle --input example_clips/fire_footage.mp4 --ou # tput output/fire_footage2.avi --size 128 # import the necessary packages from tensorflow.keras.models import load_model from collections import deque import numpy as np import argparse from mail import sendmail import pickle import imutils import cv2 import datetime import time from flask import Flask, render_template, request app = Flask(__name__) @app.route('/') def index(): return render_template('Main_page.html') @app.route('/prediction.html') def predict(): return render_template('prediction.html') @app.route('/About_us.html') def about_us(): return render_template('About_us.html') @app.route('/Result1.html', methods=['POST']) def Result1(): global annotation if request.method == 'POST': MODEL_PATH = 'model/final.model' PICKLE_PATH = 'model/final.pickle' #MODEL_PATH = 'model/real_time.model' #PICKLE_PATH = 'model/real_time.pickle' INPUT_VIDEO = request.form['inp_video'] out = INPUT_VIDEO.split('.') INPUT_VIDEO = 'example_clips/'+request.form['inp_video'] out = out[0] OUTPUT_VIDEO = 'output/' + out + '.avi' SIZE = 128 print(MODEL_PATH,PICKLE_PATH,INPUT_VIDEO,OUTPUT_VIDEO,SIZE) #load the trained model and label binarizer from disk print("[INFO] loading model and label binarizer...") model = load_model(MODEL_PATH) lb = pickle.loads(open(PICKLE_PATH, "rb").read()) # initialize the image mean for mean subtraction along with the # predictions queue mean = np.array([123.68, 116.779, 103.939][::1], dtype="float32") Q = deque(maxlen=SIZE) # initialize the video stream, pointer to output video file, and # frame dimensions vs = cv2.VideoCapture(INPUT_VIDEO) #vs = cv2.VideoCapture(0) writer = None (W, H) = (None, None) count = 0.0 flag = 0 start_frame = 0 end_frame = 0 status = {} annotation = "" que = deque() # loop over frames from the video file stream while True: # read the next frame from the file (grabbed, frame) = vs.read() count += 1.0 # if the frame was not grabbed, then we have reached the end # of the stream if not grabbed: break # if the frame dimensions are empty, grab them if W is None or H is None: (H, W) = frame.shape[:2] # clone the output frame, then convert it from BGR to RGB # ordering, resize the frame to a fixed 224x224, and then # perform mean subtraction output = frame.copy() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = cv2.resize(frame, (224, 224)).astype("float32") frame -= mean # make predictions on the frame and then update the predictions # queue preds = model.predict(np.expand_dims(frame, axis=0))[0] Q.append(preds) # perform prediction averaging over the current history of # previous predictions results = np.array(Q).mean(axis=0) i = np.argmax(results) label = lb.classes_[i] if len(que) == 30: que.popleft() if len(que) != 30: que.append(label) noOfAlerts = que.count("fire") + que.count("accident") if que.count("fire") > que.count("accident"): caseDetect = "fire" else: caseDetect = "accident" # draw the activity on the output frame text = "Alert!!: {}".format(label) # Changes starts here alert = ["fire", "accident"] #currentFrame = 0 #print(label, flag) if len(que) == 30: if caseDetect in alert and noOfAlerts > 20: cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5) if flag == 0: annotation = caseDetect start_frame = count - 20 flag = 1 else: if flag == 1: end_frame = count - 10 flag = 2 #name = './frame/frame'+str(currentFrame)+'.jpg' #cv2.imwrite(name,output) # check if the video writer is None if writer is None: # initialize our video writer fourcc = cv2.VideoWriter_fourcc(*"MJPG") writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True) # write the output frame to disk writer.write(output) # show the output image cv2.imshow("Output", output) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # changes made here if annotation != "": status = sendmail("harshpatel682@gmail.com", "Anomaly Detected!!!", "yes") status = status['email_status'] #total_time = end_time - start_time #print("Time is: {}".format(str(datetime.timedelta(seconds=(total_time))))) print("count: {}".format(count)) #print("Frame count: {}".format(f_start)) # release the file pointers print("[INFO] cleaning up...") writer.release() vs.release() start_frame = start_frame//30 end_frame = end_frame // 30 if flag == 1: end_frame = count end_frame = end_frame // 30 flag = 2 print(start_frame, end_frame) return render_template('Result1.html', label=annotation, count=count, start_time=start_frame, end_time=end_frame, status = status) if __name__ == '__main__': app.run(debug=False)
normal
{ "blob_id": "ccfcc5b644d592090786ceb35a85124c9d3275ad", "index": 5719, "step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('Main_page.html')\n\n\n@app.route('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\n@app.route('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\n@app.route('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('harshpatel682@gmail.com',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('Main_page.html')\n\n\n@app.route('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\n@app.route('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\n@app.route('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('harshpatel682@gmail.com',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('Main_page.html')\n\n\n@app.route('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\n@app.route('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\n@app.route('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('harshpatel682@gmail.com',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n", "step-4": "from tensorflow.keras.models import load_model\nfrom collections import deque\nimport numpy as np\nimport argparse\nfrom mail import sendmail\nimport pickle\nimport imutils\nimport cv2\nimport datetime\nimport time\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('Main_page.html')\n\n\n@app.route('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\n@app.route('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\n@app.route('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('harshpatel682@gmail.com',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n", "step-5": "# USAGE\n# python predict_video.py --model model/activity.model --label-bin model/lb.pickle --input example_clips/lifting.mp4 --output output/lifting_128avg.avi --size 128\n# python predict_video.py --model model/road_activity.model --label-bin model/rd.pickle --input example_clips/fire_footage.mp4 --ou\n# tput output/fire_footage2.avi --size 128\n\n# import the necessary packages\nfrom tensorflow.keras.models import load_model\nfrom collections import deque\nimport numpy as np\nimport argparse\nfrom mail import sendmail\nimport pickle\nimport imutils\nimport cv2\nimport datetime\nimport time\nfrom flask import Flask, render_template, request\n\n\napp = Flask(__name__)\n@app.route('/')\ndef index():\n return render_template('Main_page.html')\n\n@app.route('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n@app.route('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n@app.route('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n #MODEL_PATH = 'model/real_time.model'\n #PICKLE_PATH = 'model/real_time.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/'+request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n\n print(MODEL_PATH,PICKLE_PATH,INPUT_VIDEO,OUTPUT_VIDEO,SIZE)\n #load the trained model and label binarizer from disk\n print(\"[INFO] loading model and label binarizer...\")\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, \"rb\").read())\n\n # initialize the image mean for mean subtraction along with the\n # predictions queue\n mean = np.array([123.68, 116.779, 103.939][::1], dtype=\"float32\")\n Q = deque(maxlen=SIZE)\n\n # initialize the video stream, pointer to output video file, and\n # frame dimensions\n vs = cv2.VideoCapture(INPUT_VIDEO)\n #vs = cv2.VideoCapture(0)\n writer = None\n (W, H) = (None, None)\n\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = \"\"\n que = deque()\n # loop over frames from the video file stream\n while True:\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n count += 1.0\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n # if the frame dimensions are empty, grab them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # clone the output frame, then convert it from BGR to RGB\n # ordering, resize the frame to a fixed 224x224, and then\n # perform mean subtraction\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype(\"float32\")\n frame -= mean\n\n # make predictions on the frame and then update the predictions\n # queue\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n # perform prediction averaging over the current history of\n # previous predictions\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count(\"fire\") + que.count(\"accident\")\n if que.count(\"fire\") > que.count(\"accident\"):\n caseDetect = \"fire\"\n else:\n caseDetect = \"accident\"\n # draw the activity on the output frame\n text = \"Alert!!: {}\".format(label)\n\n # Changes starts here\n alert = [\"fire\", \"accident\"]\n\n #currentFrame = 0\n #print(label, flag)\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,\n 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n else:\n if flag == 1:\n end_frame = count - 10\n flag = 2\n\n #name = './frame/frame'+str(currentFrame)+'.jpg'\n #cv2.imwrite(name,output)\n\n # check if the video writer is None\n if writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30,\n (W, H), True)\n\n # write the output frame to disk\n writer.write(output)\n\n # show the output image\n cv2.imshow(\"Output\", output)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n # changes made here\n\n if annotation != \"\":\n status = sendmail(\"harshpatel682@gmail.com\", \"Anomaly Detected!!!\", \"yes\")\n status = status['email_status']\n\n #total_time = end_time - start_time\n #print(\"Time is: {}\".format(str(datetime.timedelta(seconds=(total_time)))))\n print(\"count: {}\".format(count))\n #print(\"Frame count: {}\".format(f_start))\n # release the file pointers\n print(\"[INFO] cleaning up...\")\n writer.release()\n vs.release()\n start_frame = start_frame//30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count, start_time=start_frame, end_time=end_frame,\n status = status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import itertools import unittest from pylev3 import Levenshtein TEST_DATA = [ ('classic', "kitten", "sitting", 3), ('same', "kitten", "kitten", 0), ('empty', "", "", 0), ('a', "meilenstein", "levenshtein", 4), ('b', "levenshtein", "frankenstein", 6), ('c', "confide", "deceit", 6), ('d', "CUNsperrICY", "conspiracy", 8), ] TEST_FUNCTIONS = [ # Levenshtein().classic, # too slow Levenshtein().recursive, Levenshtein().wf, Levenshtein().wfi, Levenshtein().damerau ] class Tests(unittest.TestCase): def test_singleton(self): lev1, lev2 = Levenshtein(), Levenshtein() self.assertIs(lev1, lev2) def _mk_test_fn(fn, a, b, expected): def _test_fn(self): self.assertEqual(fn(a, b), expected) self.assertEqual(fn(b, a), expected) return _test_fn for lev_fn, data in itertools.product(TEST_FUNCTIONS, TEST_DATA): name, a, b, expected = data test_fn = _mk_test_fn(lev_fn, a, b, expected) setattr(Tests, "test_{}_{}".format(name, lev_fn.__name__), test_fn) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "892d6662e4276f96797c9654d15c96a608d0835a", "index": 8927, "step-1": "<mask token>\n\n\nclass Tests(unittest.TestCase):\n\n def test_singleton(self):\n lev1, lev2 = Levenshtein(), Levenshtein()\n self.assertIs(lev1, lev2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Tests(unittest.TestCase):\n\n def test_singleton(self):\n lev1, lev2 = Levenshtein(), Levenshtein()\n self.assertIs(lev1, lev2)\n\n\ndef _mk_test_fn(fn, a, b, expected):\n\n def _test_fn(self):\n self.assertEqual(fn(a, b), expected)\n self.assertEqual(fn(b, a), expected)\n return _test_fn\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Tests(unittest.TestCase):\n\n def test_singleton(self):\n lev1, lev2 = Levenshtein(), Levenshtein()\n self.assertIs(lev1, lev2)\n\n\ndef _mk_test_fn(fn, a, b, expected):\n\n def _test_fn(self):\n self.assertEqual(fn(a, b), expected)\n self.assertEqual(fn(b, a), expected)\n return _test_fn\n\n\nfor lev_fn, data in itertools.product(TEST_FUNCTIONS, TEST_DATA):\n name, a, b, expected = data\n test_fn = _mk_test_fn(lev_fn, a, b, expected)\n setattr(Tests, 'test_{}_{}'.format(name, lev_fn.__name__), test_fn)\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nTEST_DATA = [('classic', 'kitten', 'sitting', 3), ('same', 'kitten',\n 'kitten', 0), ('empty', '', '', 0), ('a', 'meilenstein', 'levenshtein',\n 4), ('b', 'levenshtein', 'frankenstein', 6), ('c', 'confide', 'deceit',\n 6), ('d', 'CUNsperrICY', 'conspiracy', 8)]\nTEST_FUNCTIONS = [Levenshtein().recursive, Levenshtein().wf, Levenshtein().\n wfi, Levenshtein().damerau]\n\n\nclass Tests(unittest.TestCase):\n\n def test_singleton(self):\n lev1, lev2 = Levenshtein(), Levenshtein()\n self.assertIs(lev1, lev2)\n\n\ndef _mk_test_fn(fn, a, b, expected):\n\n def _test_fn(self):\n self.assertEqual(fn(a, b), expected)\n self.assertEqual(fn(b, a), expected)\n return _test_fn\n\n\nfor lev_fn, data in itertools.product(TEST_FUNCTIONS, TEST_DATA):\n name, a, b, expected = data\n test_fn = _mk_test_fn(lev_fn, a, b, expected)\n setattr(Tests, 'test_{}_{}'.format(name, lev_fn.__name__), test_fn)\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "import itertools\nimport unittest\n\nfrom pylev3 import Levenshtein\n\n\nTEST_DATA = [\n ('classic', \"kitten\", \"sitting\", 3),\n ('same', \"kitten\", \"kitten\", 0),\n ('empty', \"\", \"\", 0),\n ('a', \"meilenstein\", \"levenshtein\", 4),\n ('b', \"levenshtein\", \"frankenstein\", 6),\n ('c', \"confide\", \"deceit\", 6),\n ('d', \"CUNsperrICY\", \"conspiracy\", 8),\n]\n\nTEST_FUNCTIONS = [\n # Levenshtein().classic, # too slow\n Levenshtein().recursive,\n Levenshtein().wf,\n Levenshtein().wfi,\n Levenshtein().damerau\n]\n\n\nclass Tests(unittest.TestCase):\n def test_singleton(self):\n lev1, lev2 = Levenshtein(), Levenshtein()\n self.assertIs(lev1, lev2)\n\n\ndef _mk_test_fn(fn, a, b, expected):\n def _test_fn(self):\n self.assertEqual(fn(a, b), expected)\n self.assertEqual(fn(b, a), expected)\n return _test_fn\n\n\nfor lev_fn, data in itertools.product(TEST_FUNCTIONS, TEST_DATA):\n name, a, b, expected = data\n test_fn = _mk_test_fn(lev_fn, a, b, expected)\n setattr(Tests, \"test_{}_{}\".format(name, lev_fn.__name__), test_fn)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 2, 3, 4, 5, 7 ] }
[ 2, 3, 4, 5, 7 ]
import torch.nn as nn from torch.autograd import Variable import torch import string all_letters = string.ascii_letters + " .,;'" n_letters = len(all_letters) #Find letter index from all_letters, e.g. "a" = 0 def letterToIndex(letter): return all_letters.find(letter) #Only for demonstation def letterToTensor(letter): tensor = torch.zeros(1, n_letters) tensor[0][letterToIndex(letter)] = 1 return tensor #Turn a line into a tensor #or an array of one-hot letter vectors def lineToTensor(line): tensor = torch.zeros(len(line), 1, n_letters) for li, letter in enumerate(line): tensor[li][0][letterToIndex(letter)] = 1 return tensor class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.softmax = nn.LogSoftmax() def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) output = self.softmax(output) return output, hidden def initHidden(self): return Variable(torch.zeros(1, self.hidden_size)) all_categories = ['Medical Term', 'Common English Term'] n_hidden = 128 n_categories = 2 rnn = RNN(n_letters, n_hidden, n_categories) rnn.load_state_dict(torch.load('medicalTermsModel')) # Just return an output given a line def evaluate(line_tensor): hidden = rnn.initHidden() for i in range(line_tensor.size()[0]): output, hidden = rnn(line_tensor[i], hidden) return output def predict(input_line, n_predictions=1): output = evaluate(Variable(lineToTensor(input_line))) # Get top N categories topv, topi = output.data.topk(n_predictions, 1, True) predictions = [] for i in range(n_predictions): value = topv[0][i] category_index = topi[0][i] predictions.append([value, all_categories[category_index]]) if category_index == 0: # print('\n> %s' % input_line) predictions = (str(input_line), str(all_categories[category_index])) else: predictions = (str(input_line), str(all_categories[category_index])) return predictions
normal
{ "blob_id": "aa24442624aebeb2777f16a826cf59859d7870ba", "index": 8744, "step-1": "<mask token>\n\n\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n\n<mask token>\n\n\nclass RNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\n\n<mask token>\n\n\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n return output\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n\n<mask token>\n\n\ndef lineToTensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letterToIndex(letter)] = 1\n return tensor\n\n\nclass RNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\n\n<mask token>\n\n\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n return output\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n\ndef letterToTensor(letter):\n tensor = torch.zeros(1, n_letters)\n tensor[0][letterToIndex(letter)] = 1\n return tensor\n\n\ndef lineToTensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letterToIndex(letter)] = 1\n return tensor\n\n\nclass RNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\n\n<mask token>\nrnn.load_state_dict(torch.load('medicalTermsModel'))\n\n\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n return output\n\n\ndef predict(input_line, n_predictions=1):\n output = evaluate(Variable(lineToTensor(input_line)))\n topv, topi = output.data.topk(n_predictions, 1, True)\n predictions = []\n for i in range(n_predictions):\n value = topv[0][i]\n category_index = topi[0][i]\n predictions.append([value, all_categories[category_index]])\n if category_index == 0:\n predictions = str(input_line), str(all_categories[category_index])\n else:\n predictions = str(input_line), str(all_categories[category_index])\n return predictions\n", "step-4": "<mask token>\nall_letters = string.ascii_letters + \" .,;'\"\nn_letters = len(all_letters)\n\n\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n\ndef letterToTensor(letter):\n tensor = torch.zeros(1, n_letters)\n tensor[0][letterToIndex(letter)] = 1\n return tensor\n\n\ndef lineToTensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letterToIndex(letter)] = 1\n return tensor\n\n\nclass RNN(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\n\nall_categories = ['Medical Term', 'Common English Term']\nn_hidden = 128\nn_categories = 2\nrnn = RNN(n_letters, n_hidden, n_categories)\nrnn.load_state_dict(torch.load('medicalTermsModel'))\n\n\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n return output\n\n\ndef predict(input_line, n_predictions=1):\n output = evaluate(Variable(lineToTensor(input_line)))\n topv, topi = output.data.topk(n_predictions, 1, True)\n predictions = []\n for i in range(n_predictions):\n value = topv[0][i]\n category_index = topi[0][i]\n predictions.append([value, all_categories[category_index]])\n if category_index == 0:\n predictions = str(input_line), str(all_categories[category_index])\n else:\n predictions = str(input_line), str(all_categories[category_index])\n return predictions\n", "step-5": "import torch.nn as nn\nfrom torch.autograd import Variable\nimport torch\nimport string\n\nall_letters = string.ascii_letters + \" .,;'\"\nn_letters = len(all_letters)\n\n#Find letter index from all_letters, e.g. \"a\" = 0\ndef letterToIndex(letter):\n return all_letters.find(letter)\n\n#Only for demonstation\ndef letterToTensor(letter):\n tensor = torch.zeros(1, n_letters)\n tensor[0][letterToIndex(letter)] = 1\n return tensor\n\n#Turn a line into a tensor\n#or an array of one-hot letter vectors\n\ndef lineToTensor(line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li, letter in enumerate(line):\n tensor[li][0][letterToIndex(letter)] = 1\n return tensor\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\nall_categories = ['Medical Term', 'Common English Term']\n\nn_hidden = 128\nn_categories = 2\n\nrnn = RNN(n_letters, n_hidden, n_categories)\nrnn.load_state_dict(torch.load('medicalTermsModel'))\n\n# Just return an output given a line\ndef evaluate(line_tensor):\n hidden = rnn.initHidden()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n\n return output\n\ndef predict(input_line, n_predictions=1):\n\n output = evaluate(Variable(lineToTensor(input_line)))\n\n # Get top N categories\n topv, topi = output.data.topk(n_predictions, 1, True)\n predictions = []\n\n for i in range(n_predictions):\n value = topv[0][i]\n category_index = topi[0][i]\n predictions.append([value, all_categories[category_index]])\n if category_index == 0:\n # print('\\n> %s' % input_line)\n predictions = (str(input_line), str(all_categories[category_index]))\n else:\n predictions = (str(input_line), str(all_categories[category_index]))\n return predictions\n", "step-ids": [ 6, 7, 10, 11, 13 ] }
[ 6, 7, 10, 11, 13 ]
<|reserved_special_token_0|> def get_changelog(): with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f: text = f.read() header_matches = list(re.finditer('^=+$', text, re.MULTILINE)) text = text[:header_matches[5].start()] lines = text.splitlines()[:-1] return '=========\nChangelog\n=========\n\n' + '\n'.join(lines) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_changelog(): with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f: text = f.read() header_matches = list(re.finditer('^=+$', text, re.MULTILINE)) text = text[:header_matches[5].start()] lines = text.splitlines()[:-1] return '=========\nChangelog\n=========\n\n' + '\n'.join(lines) <|reserved_special_token_0|> with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8' ) as f: exec(f.read(), about) with open('README.rst', encoding='utf-8') as f: README = f.read() <|reserved_special_token_0|> setup(name=about['__title__'], description=about['__description__'], keywords='pypi realtime cache server', long_description='\n\n'.join([ README, CHANGELOG]), url=about['__url__'], version=about['__version__'], maintainer=about['__maintainer__'], maintainer_email=about[ '__maintainer_email__'], packages=find_packages(), include_package_data =True, zip_safe=False, license=about['__license__'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6'], install_requires=requires, extras_require=extras_require, entry_points={'console_scripts': [ 'devpi-server = devpi_server.main:main'], 'devpi_server': [ 'devpi-server-auth-basic = devpi_server.auth_basic', 'devpi-server-auth-devpi = devpi_server.auth_devpi', 'devpi-server-sqlite = devpi_server.keyfs_sqlite', 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web': ['devpi-server-status = devpi_server.views'], 'pytest11': [ 'pytest_devpi_server = pytest_devpi_server']}) <|reserved_special_token_1|> <|reserved_special_token_0|> here = os.path.abspath(os.path.dirname(__file__)) def get_changelog(): with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f: text = f.read() header_matches = list(re.finditer('^=+$', text, re.MULTILINE)) text = text[:header_matches[5].start()] lines = text.splitlines()[:-1] return '=========\nChangelog\n=========\n\n' + '\n'.join(lines) about = {} with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8' ) as f: exec(f.read(), about) with open('README.rst', encoding='utf-8') as f: README = f.read() CHANGELOG = get_changelog() requires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0', 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1', 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml'] extras_require = {} setup(name=about['__title__'], description=about['__description__'], keywords='pypi realtime cache server', long_description='\n\n'.join([ README, CHANGELOG]), url=about['__url__'], version=about['__version__'], maintainer=about['__maintainer__'], maintainer_email=about[ '__maintainer_email__'], packages=find_packages(), include_package_data =True, zip_safe=False, license=about['__license__'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6'], install_requires=requires, extras_require=extras_require, entry_points={'console_scripts': [ 'devpi-server = devpi_server.main:main'], 'devpi_server': [ 'devpi-server-auth-basic = devpi_server.auth_basic', 'devpi-server-auth-devpi = devpi_server.auth_devpi', 'devpi-server-sqlite = devpi_server.keyfs_sqlite', 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web': ['devpi-server-status = devpi_server.views'], 'pytest11': [ 'pytest_devpi_server = pytest_devpi_server']}) <|reserved_special_token_1|> import os import re from codecs import open from setuptools import find_packages, setup here = os.path.abspath(os.path.dirname(__file__)) def get_changelog(): with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f: text = f.read() header_matches = list(re.finditer('^=+$', text, re.MULTILINE)) text = text[:header_matches[5].start()] lines = text.splitlines()[:-1] return '=========\nChangelog\n=========\n\n' + '\n'.join(lines) about = {} with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8' ) as f: exec(f.read(), about) with open('README.rst', encoding='utf-8') as f: README = f.read() CHANGELOG = get_changelog() requires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0', 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1', 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml'] extras_require = {} setup(name=about['__title__'], description=about['__description__'], keywords='pypi realtime cache server', long_description='\n\n'.join([ README, CHANGELOG]), url=about['__url__'], version=about['__version__'], maintainer=about['__maintainer__'], maintainer_email=about[ '__maintainer_email__'], packages=find_packages(), include_package_data =True, zip_safe=False, license=about['__license__'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6'], install_requires=requires, extras_require=extras_require, entry_points={'console_scripts': [ 'devpi-server = devpi_server.main:main'], 'devpi_server': [ 'devpi-server-auth-basic = devpi_server.auth_basic', 'devpi-server-auth-devpi = devpi_server.auth_devpi', 'devpi-server-sqlite = devpi_server.keyfs_sqlite', 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web': ['devpi-server-status = devpi_server.views'], 'pytest11': [ 'pytest_devpi_server = pytest_devpi_server']}) <|reserved_special_token_1|> #! /usr/bin/env python import os import re from codecs import open from setuptools import find_packages, setup here = os.path.abspath(os.path.dirname(__file__)) def get_changelog(): with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f: text = f.read() header_matches = list(re.finditer('^=+$', text, re.MULTILINE)) text = text[:header_matches[5].start()] # until fifth header lines = text.splitlines()[:-1] # all lines without fifth release number return '=========\nChangelog\n=========\n\n' + '\n'.join(lines) about = {} with open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f: exec(f.read(), about) with open('README.rst', encoding='utf-8') as f: README = f.read() CHANGELOG = get_changelog() requires = [ 'py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0', 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1', 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml', ] extras_require = {} setup( name=about['__title__'], description=about['__description__'], keywords='pypi realtime cache server', long_description="\n\n".join([README, CHANGELOG]), url=about['__url__'], version=about['__version__'], maintainer=about['__maintainer__'], maintainer_email=about['__maintainer_email__'], packages=find_packages(), include_package_data=True, zip_safe=False, license=about['__license__'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], install_requires=requires, extras_require=extras_require, entry_points={ 'console_scripts': [ 'devpi-server = devpi_server.main:main' ], 'devpi_server': [ 'devpi-server-auth-basic = devpi_server.auth_basic', 'devpi-server-auth-devpi = devpi_server.auth_devpi', 'devpi-server-sqlite = devpi_server.keyfs_sqlite', 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ], 'devpi_web': [ 'devpi-server-status = devpi_server.views'], 'pytest11': [ 'pytest_devpi_server = pytest_devpi_server' ], }, )
flexible
{ "blob_id": "c81889cf4d87933b562aa4618bc5185a8d213107", "index": 8075, "step-1": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\n<mask token>\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n<mask token>\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n", "step-3": "<mask token>\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n", "step-4": "import os\nimport re\nfrom codecs import open\nfrom setuptools import find_packages, setup\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()]\n lines = text.splitlines()[:-1]\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\n\nabout = {}\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8'\n ) as f:\n exec(f.read(), about)\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\nCHANGELOG = get_changelog()\nrequires = ['py>=1.4.23', 'appdirs', 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24', 'execnet>=1.2', 'pyramid>=1.8', 'waitress>=1.0.1',\n 'repoze.lru>=0.6', 'passlib[argon2]', 'pluggy>=0.3.0,<1.0', 'strictyaml']\nextras_require = {}\nsetup(name=about['__title__'], description=about['__description__'],\n keywords='pypi realtime cache server', long_description='\\n\\n'.join([\n README, CHANGELOG]), url=about['__url__'], version=about['__version__'],\n maintainer=about['__maintainer__'], maintainer_email=about[\n '__maintainer_email__'], packages=find_packages(), include_package_data\n =True, zip_safe=False, license=about['__license__'], classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], install_requires=requires,\n extras_require=extras_require, entry_points={'console_scripts': [\n 'devpi-server = devpi_server.main:main'], 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs'], 'devpi_web':\n ['devpi-server-status = devpi_server.views'], 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server']})\n", "step-5": "#! /usr/bin/env python\n\nimport os\nimport re\n\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_changelog():\n with open(os.path.join(here, 'CHANGELOG'), encoding='utf-8') as f:\n text = f.read()\n header_matches = list(re.finditer('^=+$', text, re.MULTILINE))\n text = text[:header_matches[5].start()] # until fifth header\n lines = text.splitlines()[:-1] # all lines without fifth release number\n return '=========\\nChangelog\\n=========\\n\\n' + '\\n'.join(lines)\n\nabout = {}\n\nwith open(os.path.join(here, 'devpi_server', '__version__.py'), 'r', 'utf-8') as f:\n exec(f.read(), about)\n\nwith open('README.rst', encoding='utf-8') as f:\n README = f.read()\n\nCHANGELOG = get_changelog()\n\nrequires = [\n 'py>=1.4.23',\n 'appdirs',\n 'devpi_common<4,>=3.3.0',\n 'itsdangerous>=0.24',\n 'execnet>=1.2',\n 'pyramid>=1.8',\n 'waitress>=1.0.1',\n 'repoze.lru>=0.6',\n 'passlib[argon2]',\n 'pluggy>=0.3.0,<1.0',\n 'strictyaml',\n ]\nextras_require = {}\n\nsetup(\n name=about['__title__'],\n description=about['__description__'],\n keywords='pypi realtime cache server',\n long_description=\"\\n\\n\".join([README, CHANGELOG]),\n url=about['__url__'],\n version=about['__version__'],\n maintainer=about['__maintainer__'],\n maintainer_email=about['__maintainer_email__'],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n license=about['__license__'],\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n install_requires=requires,\n extras_require=extras_require,\n entry_points={\n 'console_scripts': [\n 'devpi-server = devpi_server.main:main' ],\n 'devpi_server': [\n 'devpi-server-auth-basic = devpi_server.auth_basic',\n 'devpi-server-auth-devpi = devpi_server.auth_devpi',\n 'devpi-server-sqlite = devpi_server.keyfs_sqlite',\n 'devpi-server-sqlite-fs = devpi_server.keyfs_sqlite_fs' ],\n 'devpi_web': [\n 'devpi-server-status = devpi_server.views'],\n 'pytest11': [\n 'pytest_devpi_server = pytest_devpi_server' ],\n },\n )\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows() plt.imshow(img, cmap='gray', interpolation='bicubic') plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE) cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows() plt.imshow(img, cmap='gray', interpolation='bicubic') plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt import cv2 img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE) cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows() plt.imshow(img, cmap='gray', interpolation='bicubic') plt.show() <|reserved_special_token_1|> import matplotlib.pyplot as plt import cv2 # 0 img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE) # IMREAD_COLOR = 1 # IMREAD_UNCHANGED = -1 cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows() # cv2.imwrite('watchgray,png', img) plt.imshow(img, cmap='gray', interpolation='bicubic') plt.show()
flexible
{ "blob_id": "34ccaaf5eb47afd556588cd94cddbddaee1f0b53", "index": 2851, "step-1": "<mask token>\n", "step-2": "<mask token>\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n", "step-3": "<mask token>\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n", "step-4": "import matplotlib.pyplot as plt\nimport cv2\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n", "step-5": "import matplotlib.pyplot as plt\nimport cv2\n# 0\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\n# IMREAD_COLOR = 1\n# IMREAD_UNCHANGED = -1\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# cv2.imwrite('watchgray,png', img)\n\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import unittest import TicTacToe class pVpTestCase(unittest.TestCase): # def test_something(self): # self.assertEqual(True, False) def twoplayer_setup(self): game1 = TicTacToe.Game() player1 = TicTacToe.Player('X', game1) player2 = TicTacToe.Player('O', game1) return (game1, player1, player2) #Player 1 wins def test_mock_game1(self): game1, player1, player2 = self.twoplayer_setup() p1moves = ['top left', 'middle', 'bottom right'] p2moves = ['top middle', 'bottom left', 'top right'] winner = game1.play_test(player1, player2, p1moves, p2moves) self.assertEqual(player1, winner) #Player 2 wins def test_mock_game2(self): game1, player1, player2 = self.twoplayer_setup() p1moves = ['top right', 'middle', 'bottom right'] p2moves = ['top left', 'middle left', 'bottom left'] winner = game1.play_test(player1, player2, p1moves, p2moves) self.assertEqual(player2, winner) #Draw def test_mock_game3(self): game1, player1, player2 = self.twoplayer_setup() p1moves = ['top right', 'middle top', 'middle', 'bottom right', 'middle left'] p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle'] winner = game1.play_test(player1, player2, p1moves, p2moves) self.assertEqual(None, winner) class CvPTestCase(unittest.TestCase): def onecompplayer_setup(self): game1 = TicTacToe.Game() computer1 = TicTacToe.Computer('X', game1) player2 = TicTacToe.Player('O', game1) return (game1, computer1, player2) def test_place3(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1:"X", 2:"X",3:"-", 4:"-", 5:"-", 6:"-", 7:"-", 8:"-", 9:"-"} p2moves = [] winner = game1.play_comp_test(computer1, player2, p2moves) self.assertEqual(computer1, winner) self.assertEqual({1:"X", 2:"X",3:"X", 4:"-", 5:"-", 6:"-", 7:"-", 8:"-", 9:"-"}, game1.game_board ) def test_place2(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1:"-", 2:"-",3:"-", 4:"-", 5:"X", 6:"-", 7:"-", 8:"X", 9:"-"} p2moves = [] winner = game1.play_comp_test(computer1, player2, p2moves) self.assertEqual(computer1, winner) self.assertEqual({1:"-", 2:"X",3:"-", 4:"-", 5:"X", 6:"-", 7:"-", 8:"X", 9:"-"}, game1.game_board ) def test_place8(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "-", 5: "-", 6: "-", 7: "X", 8: "-", 9: "X"} p2moves = [] winner = game1.play_comp_test(computer1, player2, p2moves) self.assertEqual(computer1, winner) self.assertEqual({1: "-", 2: "-", 3: "-", 4: "-", 5: "-", 6: "-", 7: "X", 8: "X", 9: "X"}, game1.game_board ) def test_block5(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "O", 5: "-", 6: "O", 7: "-", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "-", 4: "O", 5: "X", 6: "O", 7: "-", 8: "-", 9: "-"}, game1.game_board ) def test_block7(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "O", 4: "-", 5: "O", 6: "-", 7: "-", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "O", 4: "-", 5: "O", 6: "-", 7: "X", 8: "-", 9: "-"}, game1.game_board ) def test_block3(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "-", 5: "-", 6: "O", 7: "-", 8: "-", 9: "O"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "X", 4: "-", 5: "-", 6: "O", 7: "-", 8: "-", 9: "O"}, game1.game_board ) def test_center_empty(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "-", 5: "-", 6: "-", 7: "-", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "-", 4: "-", 5: "X", 6: "-", 7: "-", 8: "-", 9: "-"}, game1.game_board ) def test_center_nonempty(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "O", 2: "-", 3: "-", 4: "X", 5: "-", 6: "O", 7: "X", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "O", 2: "-", 3: "-", 4: "X", 5: "X", 6: "O", 7: "X", 8: "-", 9: "-"}, game1.game_board ) def test_oppcorner7(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "O", 4: "-", 5: "X", 6: "-", 7: "-", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "O", 4: "-", 5: "X", 6: "-", 7: "X", 8: "-", 9: "-"}, game1.game_board ) def test_oppcorner1(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "O", 5: "X", 6: "X", 7: "-", 8: "-", 9: "O"} computer1.auto_move() self.assertEqual({1: "X", 2: "-", 3: "-", 4: "O", 5: "X", 6: "X", 7: "-", 8: "-", 9: "O"}, game1.game_board ) def test_oppcorner3(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "-", 2: "-", 3: "-", 4: "-", 5: "X", 6: "-", 7: "O", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "-", 2: "-", 3: "X", 4: "-", 5: "X", 6: "-", 7: "O", 8: "-", 9: "-"}, game1.game_board ) def test_oppcorner9(self): game1, computer1, player2 = self.onecompplayer_setup() game1.game_board = {1: "O", 2: "-", 3: "-", 4: "-", 5: "X", 6: "-", 7: "-", 8: "-", 9: "-"} computer1.auto_move() self.assertEqual({1: "O", 2: "-", 3: "-", 4: "-", 5: "X", 6: "-", 7: "-", 8: "-", 9: "X"}, game1.game_board ) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "de0521db3909054c333ac3877ff0adf15ab180fb", "index": 1732, "step-1": "<mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n <mask token>\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n <mask token>\n\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n <mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n\n def twoplayer_setup(self):\n game1 = TicTacToe.Game()\n player1 = TicTacToe.Player('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, player1, player2\n\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n\n def test_mock_game3(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle top', 'middle', 'bottom right',\n 'middle left']\n p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(None, winner)\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "import unittest\nimport TicTacToe\n\nclass pVpTestCase(unittest.TestCase):\n # def test_something(self):\n # self.assertEqual(True, False)\n\n def twoplayer_setup(self):\n game1 = TicTacToe.Game()\n player1 = TicTacToe.Player('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return (game1, player1, player2)\n\n #Player 1 wins\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n #Player 2 wins\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n\n #Draw\n def test_mock_game3(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle top', 'middle', 'bottom right', 'middle left']\n p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(None, winner)\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return (game1, computer1, player2)\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1:\"X\", 2:\"X\",3:\"-\",\n 4:\"-\", 5:\"-\", 6:\"-\",\n 7:\"-\", 8:\"-\", 9:\"-\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1:\"X\", 2:\"X\",3:\"X\",\n 4:\"-\", 5:\"-\", 6:\"-\",\n 7:\"-\", 8:\"-\", 9:\"-\"},\n game1.game_board\n )\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1:\"-\", 2:\"-\",3:\"-\",\n 4:\"-\", 5:\"X\", 6:\"-\",\n 7:\"-\", 8:\"X\", 9:\"-\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1:\"-\", 2:\"X\",3:\"-\",\n 4:\"-\", 5:\"X\", 6:\"-\",\n 7:\"-\", 8:\"X\", 9:\"-\"},\n game1.game_board\n )\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"X\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"X\", 8: \"X\", 9: \"X\"},\n game1.game_board\n )\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"O\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"O\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"O\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"X\",\n 4: \"-\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"O\"},\n game1.game_board\n )\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"X\", 5: \"-\", 6: \"O\",\n 7: \"X\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"X\", 5: \"X\", 6: \"O\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"X\",\n 7: \"-\", 8: \"-\", 9: \"O\"}\n computer1.auto_move()\n self.assertEqual({1: \"X\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"X\",\n 7: \"-\", 8: \"-\", 9: \"O\"},\n game1.game_board\n )\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"O\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"X\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"O\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"X\"},\n game1.game_board\n )\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 13, 15, 17, 20, 22 ] }
[ 13, 15, 17, 20, 22 ]
#!/usr/bin/env python3 # coding=utf-8 import fire import json import os import time import requests import time import hashlib import random root_path, file_name = os.path.split(os.path.realpath(__file__)) ip_list_path = ''.join([root_path, os.path.sep, 'ip_list.json']) class ProxySwift(object): server_id = '1' def requerst_get(self, url, data, *p, **kwargs): SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j' PartnerID = '2017061217350058' TimeStamp = int(time.time()) source_data = { 'partner_id': PartnerID, 'timestamp': TimeStamp } source_data.update(data) tmp_data = [i for i in source_data.items()] tmp_data = sorted(tmp_data, key=lambda i: i[0]) url_list = ['{}{}'.format(*i) for i in tmp_data] # url_list.reverse() # sign = ''.join(url_list) # sign = ''.join(sorted(sign)) sign = ''.join(url_list) # sign = ''.join(sorted(sign)) data = sign + SecretKey md_5 = hashlib.md5() md_5.update(data.encode("utf-8")) sign = md_5.hexdigest() source_data.update({'sign': sign}) return requests.get(url, params=source_data, verify=False, *p, **kwargs) def get_ip(self, interface_id='', pool_id=''): url = 'https://api.proxyswift.com/ip/get' data = { 'server_id': self.server_id, 'pool_id': pool_id, 'interface_id': interface_id, } r = self.requerst_get(url, data) response = r.json() return response def get_task(self, task_id): url = 'https://api.proxyswift.com/task/get' data = {'task_id': task_id} r = self.requerst_get(url, data) return r.json() def changes_ip(self, interface_id, filter=24): url = 'https://api.proxyswift.com/ip/change' data = { 'server_id': self.server_id, 'interface_id': interface_id, 'filter': filter, } r = self.requerst_get(url, data) task_id = r.json()['taskId'] #status = self(task_id)['status'] i = 1 while True: time.sleep(i%2+1) status = self.get_task(task_id)['status'] if status == 'success': ip_port = self.get_ip(interface_id) return ip_port class ProxyPool(object): def __init__(self, proxyswift=ProxySwift(), interval=4): self.interval = interval self.ps = proxyswift self.count = 0 self.index = 0 with open(ip_list_path, 'r', encoding='utf-8') as f: self.pool = json.loads(f.read()) def get(self): # 从 pool中随机取一个ip with open(ip_list_path, 'r', encoding='utf-8') as f: self.pool = json.loads(f.read()) ip = random.choice(self.pool) ip = "{0}:{1}".format(ip['ip'], ip['port']) print(ip) return ip def change_ip(self, proxy_server): for ip in self.pool: if proxy_server == "http://%(ip)s:%(port)s" % ip: self.pool.pop(0) self.ps.changes_ip(ip['id']) self.pool = self.ps.get_ip() time.sleep(1) break self.refresh_ip() def refresh_ip(self): time.sleep(5) self.pool = self.ps.get_ip() print(self.pool) # os.environ['ip_list'] = json.dumps(self.ps.get_ip()) with open(ip_list_path, 'w', encoding='utf-8') as f: f.write(json.dumps(self.ps.get_ip())) def main(): fire.Fire(ProxyPool) if __name__ == '__main__': main()
normal
{ "blob_id": "0ff96b2314927d7b3e763242e554fd561f3c9343", "index": 5872, "step-1": "<mask token>\n\n\nclass ProxySwift(object):\n <mask token>\n\n def requerst_get(self, url, data, *p, **kwargs):\n SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'\n PartnerID = '2017061217350058'\n TimeStamp = int(time.time())\n source_data = {'partner_id': PartnerID, 'timestamp': TimeStamp}\n source_data.update(data)\n tmp_data = [i for i in source_data.items()]\n tmp_data = sorted(tmp_data, key=lambda i: i[0])\n url_list = ['{}{}'.format(*i) for i in tmp_data]\n sign = ''.join(url_list)\n data = sign + SecretKey\n md_5 = hashlib.md5()\n md_5.update(data.encode('utf-8'))\n sign = md_5.hexdigest()\n source_data.update({'sign': sign})\n return requests.get(url, *p, params=source_data, verify=False, **kwargs\n )\n\n def get_ip(self, interface_id='', pool_id=''):\n url = 'https://api.proxyswift.com/ip/get'\n data = {'server_id': self.server_id, 'pool_id': pool_id,\n 'interface_id': interface_id}\n r = self.requerst_get(url, data)\n response = r.json()\n return response\n <mask token>\n\n def changes_ip(self, interface_id, filter=24):\n url = 'https://api.proxyswift.com/ip/change'\n data = {'server_id': self.server_id, 'interface_id': interface_id,\n 'filter': filter}\n r = self.requerst_get(url, data)\n task_id = r.json()['taskId']\n i = 1\n while True:\n time.sleep(i % 2 + 1)\n status = self.get_task(task_id)['status']\n if status == 'success':\n ip_port = self.get_ip(interface_id)\n return ip_port\n\n\nclass ProxyPool(object):\n\n def __init__(self, proxyswift=ProxySwift(), interval=4):\n self.interval = interval\n self.ps = proxyswift\n self.count = 0\n self.index = 0\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n\n def get(self):\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n ip = random.choice(self.pool)\n ip = '{0}:{1}'.format(ip['ip'], ip['port'])\n print(ip)\n return ip\n\n def change_ip(self, proxy_server):\n for ip in self.pool:\n if proxy_server == 'http://%(ip)s:%(port)s' % ip:\n self.pool.pop(0)\n self.ps.changes_ip(ip['id'])\n self.pool = self.ps.get_ip()\n time.sleep(1)\n break\n self.refresh_ip()\n\n def refresh_ip(self):\n time.sleep(5)\n self.pool = self.ps.get_ip()\n print(self.pool)\n with open(ip_list_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.ps.get_ip()))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ProxySwift(object):\n <mask token>\n\n def requerst_get(self, url, data, *p, **kwargs):\n SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'\n PartnerID = '2017061217350058'\n TimeStamp = int(time.time())\n source_data = {'partner_id': PartnerID, 'timestamp': TimeStamp}\n source_data.update(data)\n tmp_data = [i for i in source_data.items()]\n tmp_data = sorted(tmp_data, key=lambda i: i[0])\n url_list = ['{}{}'.format(*i) for i in tmp_data]\n sign = ''.join(url_list)\n data = sign + SecretKey\n md_5 = hashlib.md5()\n md_5.update(data.encode('utf-8'))\n sign = md_5.hexdigest()\n source_data.update({'sign': sign})\n return requests.get(url, *p, params=source_data, verify=False, **kwargs\n )\n\n def get_ip(self, interface_id='', pool_id=''):\n url = 'https://api.proxyswift.com/ip/get'\n data = {'server_id': self.server_id, 'pool_id': pool_id,\n 'interface_id': interface_id}\n r = self.requerst_get(url, data)\n response = r.json()\n return response\n\n def get_task(self, task_id):\n url = 'https://api.proxyswift.com/task/get'\n data = {'task_id': task_id}\n r = self.requerst_get(url, data)\n return r.json()\n\n def changes_ip(self, interface_id, filter=24):\n url = 'https://api.proxyswift.com/ip/change'\n data = {'server_id': self.server_id, 'interface_id': interface_id,\n 'filter': filter}\n r = self.requerst_get(url, data)\n task_id = r.json()['taskId']\n i = 1\n while True:\n time.sleep(i % 2 + 1)\n status = self.get_task(task_id)['status']\n if status == 'success':\n ip_port = self.get_ip(interface_id)\n return ip_port\n\n\nclass ProxyPool(object):\n\n def __init__(self, proxyswift=ProxySwift(), interval=4):\n self.interval = interval\n self.ps = proxyswift\n self.count = 0\n self.index = 0\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n\n def get(self):\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n ip = random.choice(self.pool)\n ip = '{0}:{1}'.format(ip['ip'], ip['port'])\n print(ip)\n return ip\n\n def change_ip(self, proxy_server):\n for ip in self.pool:\n if proxy_server == 'http://%(ip)s:%(port)s' % ip:\n self.pool.pop(0)\n self.ps.changes_ip(ip['id'])\n self.pool = self.ps.get_ip()\n time.sleep(1)\n break\n self.refresh_ip()\n\n def refresh_ip(self):\n time.sleep(5)\n self.pool = self.ps.get_ip()\n print(self.pool)\n with open(ip_list_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.ps.get_ip()))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ProxySwift(object):\n server_id = '1'\n\n def requerst_get(self, url, data, *p, **kwargs):\n SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'\n PartnerID = '2017061217350058'\n TimeStamp = int(time.time())\n source_data = {'partner_id': PartnerID, 'timestamp': TimeStamp}\n source_data.update(data)\n tmp_data = [i for i in source_data.items()]\n tmp_data = sorted(tmp_data, key=lambda i: i[0])\n url_list = ['{}{}'.format(*i) for i in tmp_data]\n sign = ''.join(url_list)\n data = sign + SecretKey\n md_5 = hashlib.md5()\n md_5.update(data.encode('utf-8'))\n sign = md_5.hexdigest()\n source_data.update({'sign': sign})\n return requests.get(url, *p, params=source_data, verify=False, **kwargs\n )\n\n def get_ip(self, interface_id='', pool_id=''):\n url = 'https://api.proxyswift.com/ip/get'\n data = {'server_id': self.server_id, 'pool_id': pool_id,\n 'interface_id': interface_id}\n r = self.requerst_get(url, data)\n response = r.json()\n return response\n\n def get_task(self, task_id):\n url = 'https://api.proxyswift.com/task/get'\n data = {'task_id': task_id}\n r = self.requerst_get(url, data)\n return r.json()\n\n def changes_ip(self, interface_id, filter=24):\n url = 'https://api.proxyswift.com/ip/change'\n data = {'server_id': self.server_id, 'interface_id': interface_id,\n 'filter': filter}\n r = self.requerst_get(url, data)\n task_id = r.json()['taskId']\n i = 1\n while True:\n time.sleep(i % 2 + 1)\n status = self.get_task(task_id)['status']\n if status == 'success':\n ip_port = self.get_ip(interface_id)\n return ip_port\n\n\nclass ProxyPool(object):\n\n def __init__(self, proxyswift=ProxySwift(), interval=4):\n self.interval = interval\n self.ps = proxyswift\n self.count = 0\n self.index = 0\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n\n def get(self):\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n ip = random.choice(self.pool)\n ip = '{0}:{1}'.format(ip['ip'], ip['port'])\n print(ip)\n return ip\n\n def change_ip(self, proxy_server):\n for ip in self.pool:\n if proxy_server == 'http://%(ip)s:%(port)s' % ip:\n self.pool.pop(0)\n self.ps.changes_ip(ip['id'])\n self.pool = self.ps.get_ip()\n time.sleep(1)\n break\n self.refresh_ip()\n\n def refresh_ip(self):\n time.sleep(5)\n self.pool = self.ps.get_ip()\n print(self.pool)\n with open(ip_list_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.ps.get_ip()))\n\n\ndef main():\n fire.Fire(ProxyPool)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nroot_path, file_name = os.path.split(os.path.realpath(__file__))\nip_list_path = ''.join([root_path, os.path.sep, 'ip_list.json'])\n\n\nclass ProxySwift(object):\n server_id = '1'\n\n def requerst_get(self, url, data, *p, **kwargs):\n SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'\n PartnerID = '2017061217350058'\n TimeStamp = int(time.time())\n source_data = {'partner_id': PartnerID, 'timestamp': TimeStamp}\n source_data.update(data)\n tmp_data = [i for i in source_data.items()]\n tmp_data = sorted(tmp_data, key=lambda i: i[0])\n url_list = ['{}{}'.format(*i) for i in tmp_data]\n sign = ''.join(url_list)\n data = sign + SecretKey\n md_5 = hashlib.md5()\n md_5.update(data.encode('utf-8'))\n sign = md_5.hexdigest()\n source_data.update({'sign': sign})\n return requests.get(url, *p, params=source_data, verify=False, **kwargs\n )\n\n def get_ip(self, interface_id='', pool_id=''):\n url = 'https://api.proxyswift.com/ip/get'\n data = {'server_id': self.server_id, 'pool_id': pool_id,\n 'interface_id': interface_id}\n r = self.requerst_get(url, data)\n response = r.json()\n return response\n\n def get_task(self, task_id):\n url = 'https://api.proxyswift.com/task/get'\n data = {'task_id': task_id}\n r = self.requerst_get(url, data)\n return r.json()\n\n def changes_ip(self, interface_id, filter=24):\n url = 'https://api.proxyswift.com/ip/change'\n data = {'server_id': self.server_id, 'interface_id': interface_id,\n 'filter': filter}\n r = self.requerst_get(url, data)\n task_id = r.json()['taskId']\n i = 1\n while True:\n time.sleep(i % 2 + 1)\n status = self.get_task(task_id)['status']\n if status == 'success':\n ip_port = self.get_ip(interface_id)\n return ip_port\n\n\nclass ProxyPool(object):\n\n def __init__(self, proxyswift=ProxySwift(), interval=4):\n self.interval = interval\n self.ps = proxyswift\n self.count = 0\n self.index = 0\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n\n def get(self):\n with open(ip_list_path, 'r', encoding='utf-8') as f:\n self.pool = json.loads(f.read())\n ip = random.choice(self.pool)\n ip = '{0}:{1}'.format(ip['ip'], ip['port'])\n print(ip)\n return ip\n\n def change_ip(self, proxy_server):\n for ip in self.pool:\n if proxy_server == 'http://%(ip)s:%(port)s' % ip:\n self.pool.pop(0)\n self.ps.changes_ip(ip['id'])\n self.pool = self.ps.get_ip()\n time.sleep(1)\n break\n self.refresh_ip()\n\n def refresh_ip(self):\n time.sleep(5)\n self.pool = self.ps.get_ip()\n print(self.pool)\n with open(ip_list_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.ps.get_ip()))\n\n\ndef main():\n fire.Fire(ProxyPool)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python3\r\n# coding=utf-8\r\nimport fire\r\nimport json\r\nimport os\r\nimport time\r\nimport requests\r\nimport time\r\nimport hashlib\r\nimport random\r\n\r\nroot_path, file_name = os.path.split(os.path.realpath(__file__))\r\nip_list_path = ''.join([root_path, os.path.sep, 'ip_list.json'])\r\n\r\n\r\nclass ProxySwift(object):\r\n server_id = '1'\r\n\r\n def requerst_get(self, url, data, *p, **kwargs):\r\n SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'\r\n\r\n PartnerID = '2017061217350058'\r\n TimeStamp = int(time.time())\r\n\r\n\r\n source_data = {\r\n 'partner_id': PartnerID,\r\n 'timestamp': TimeStamp\r\n }\r\n\r\n source_data.update(data)\r\n\r\n tmp_data = [i for i in source_data.items()]\r\n tmp_data = sorted(tmp_data, key=lambda i: i[0])\r\n\r\n url_list = ['{}{}'.format(*i) for i in tmp_data]\r\n # url_list.reverse()\r\n # sign = ''.join(url_list)\r\n # sign = ''.join(sorted(sign))\r\n\r\n sign = ''.join(url_list)\r\n # sign = ''.join(sorted(sign))\r\n\r\n data = sign + SecretKey\r\n md_5 = hashlib.md5()\r\n md_5.update(data.encode(\"utf-8\"))\r\n sign = md_5.hexdigest()\r\n source_data.update({'sign': sign})\r\n return requests.get(url, params=source_data, verify=False, *p, **kwargs)\r\n\r\n def get_ip(self, interface_id='', pool_id=''):\r\n url = 'https://api.proxyswift.com/ip/get'\r\n data = {\r\n 'server_id': self.server_id,\r\n 'pool_id': pool_id,\r\n 'interface_id': interface_id,\r\n }\r\n r = self.requerst_get(url, data)\r\n response = r.json()\r\n return response\r\n\r\n def get_task(self, task_id):\r\n url = 'https://api.proxyswift.com/task/get'\r\n data = {'task_id': task_id}\r\n r = self.requerst_get(url, data)\r\n\r\n return r.json()\r\n\r\n def changes_ip(self, interface_id, filter=24):\r\n url = 'https://api.proxyswift.com/ip/change'\r\n data = {\r\n 'server_id': self.server_id,\r\n 'interface_id': interface_id,\r\n 'filter': filter,\r\n }\r\n\r\n r = self.requerst_get(url, data)\r\n task_id = r.json()['taskId']\r\n #status = self(task_id)['status']\r\n\r\n i = 1\r\n while True:\r\n time.sleep(i%2+1)\r\n status = self.get_task(task_id)['status']\r\n if status == 'success':\r\n ip_port = self.get_ip(interface_id)\r\n return ip_port\r\n\r\n\r\nclass ProxyPool(object):\r\n def __init__(self, proxyswift=ProxySwift(), interval=4):\r\n\r\n self.interval = interval\r\n self.ps = proxyswift\r\n self.count = 0\r\n self.index = 0\r\n\r\n with open(ip_list_path, 'r', encoding='utf-8') as f:\r\n self.pool = json.loads(f.read())\r\n\r\n def get(self):\r\n # 从 pool中随机取一个ip\r\n with open(ip_list_path, 'r', encoding='utf-8') as f:\r\n self.pool = json.loads(f.read())\r\n ip = random.choice(self.pool)\r\n ip = \"{0}:{1}\".format(ip['ip'], ip['port'])\r\n print(ip)\r\n return ip\r\n\r\n def change_ip(self, proxy_server):\r\n for ip in self.pool:\r\n if proxy_server == \"http://%(ip)s:%(port)s\" % ip:\r\n self.pool.pop(0)\r\n self.ps.changes_ip(ip['id'])\r\n self.pool = self.ps.get_ip()\r\n time.sleep(1)\r\n break\r\n self.refresh_ip()\r\n\r\n def refresh_ip(self):\r\n time.sleep(5)\r\n self.pool = self.ps.get_ip()\r\n print(self.pool)\r\n # os.environ['ip_list'] = json.dumps(self.ps.get_ip())\r\n with open(ip_list_path, 'w', encoding='utf-8') as f:\r\n f.write(json.dumps(self.ps.get_ip()))\r\n\r\n\r\ndef main():\r\n fire.Fire(ProxyPool)\r\n\r\nif __name__ == '__main__':\r\n main()", "step-ids": [ 9, 10, 13, 14, 16 ] }
[ 9, 10, 13, 14, 16 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> model.fit(data_train, label_train) <|reserved_special_token_0|> print(model.score(data_test, label_test)) print(accuracy_score(label_test, predictions)) print(accuracy_score(label_test, predictions, normalize=False)) print(metrics.confusion_matrix(predictions, label_test)) print(metrics.classification_report(label_test, predictions)) <|reserved_special_token_1|> <|reserved_special_token_0|> data = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]] labels = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] data_train, data_test, label_train, label_test = train_test_split(data, labels, test_size=0.5, random_state=7) model = SVC(kernel='linear') model.fit(data_train, label_train) predictions = model.predict(data_test) print(model.score(data_test, label_test)) print(accuracy_score(label_test, predictions)) print(accuracy_score(label_test, predictions, normalize=False)) print(metrics.confusion_matrix(predictions, label_test)) print(metrics.classification_report(label_test, predictions)) <|reserved_special_token_1|> <|reserved_special_token_0|> import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.svm import SVC import sklearn.metrics as metrics from sklearn.metrics import accuracy_score data = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]] labels = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] data_train, data_test, label_train, label_test = train_test_split(data, labels, test_size=0.5, random_state=7) model = SVC(kernel='linear') model.fit(data_train, label_train) predictions = model.predict(data_test) print(model.score(data_test, label_test)) print(accuracy_score(label_test, predictions)) print(accuracy_score(label_test, predictions, normalize=False)) print(metrics.confusion_matrix(predictions, label_test)) print(metrics.classification_report(label_test, predictions)) <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created for COMP5121 Lab on 2017 JUN 24 @author: King """ import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.svm import SVC import sklearn.metrics as metrics from sklearn.metrics import accuracy_score data = [[0],[1],[2],[3],[4], [5],[6],[7],[8],[9]] # input dataframe samples labels = [0,0,0,0,0, 1,1,1,1,1] # the function we're training is " >4 " data_train, data_test, label_train, label_test = train_test_split(data, labels, test_size=0.5, random_state=7) model = SVC(kernel='linear') model.fit(data_train, label_train) predictions = model.predict(data_test) print(model.score(data_test, label_test)) print(accuracy_score(label_test, predictions)) print(accuracy_score(label_test, predictions, normalize=False)) print(metrics.confusion_matrix(predictions, label_test)) print(metrics.classification_report(label_test, predictions))
flexible
{ "blob_id": "33365d5ce5d2a7d28b76a7897de25e1f35d28855", "index": 6269, "step-1": "<mask token>\n", "step-2": "<mask token>\nmodel.fit(data_train, label_train)\n<mask token>\nprint(model.score(data_test, label_test))\nprint(accuracy_score(label_test, predictions))\nprint(accuracy_score(label_test, predictions, normalize=False))\nprint(metrics.confusion_matrix(predictions, label_test))\nprint(metrics.classification_report(label_test, predictions))\n", "step-3": "<mask token>\ndata = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]\nlabels = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\ndata_train, data_test, label_train, label_test = train_test_split(data,\n labels, test_size=0.5, random_state=7)\nmodel = SVC(kernel='linear')\nmodel.fit(data_train, label_train)\npredictions = model.predict(data_test)\nprint(model.score(data_test, label_test))\nprint(accuracy_score(label_test, predictions))\nprint(accuracy_score(label_test, predictions, normalize=False))\nprint(metrics.confusion_matrix(predictions, label_test))\nprint(metrics.classification_report(label_test, predictions))\n", "step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import accuracy_score\ndata = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]\nlabels = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\ndata_train, data_test, label_train, label_test = train_test_split(data,\n labels, test_size=0.5, random_state=7)\nmodel = SVC(kernel='linear')\nmodel.fit(data_train, label_train)\npredictions = model.predict(data_test)\nprint(model.score(data_test, label_test))\nprint(accuracy_score(label_test, predictions))\nprint(accuracy_score(label_test, predictions, normalize=False))\nprint(metrics.confusion_matrix(predictions, label_test))\nprint(metrics.classification_report(label_test, predictions))\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated for COMP5121 Lab on 2017 JUN 24\n\n@author: King\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import accuracy_score\n\ndata = [[0],[1],[2],[3],[4], [5],[6],[7],[8],[9]] # input dataframe samples\nlabels = [0,0,0,0,0, 1,1,1,1,1] # the function we're training is \" >4 \"\n\ndata_train, data_test, label_train, label_test = train_test_split(data, labels, test_size=0.5, random_state=7)\n\nmodel = SVC(kernel='linear') \n\nmodel.fit(data_train, label_train) \n \n\npredictions = model.predict(data_test)\n\nprint(model.score(data_test, label_test))\n\nprint(accuracy_score(label_test, predictions))\nprint(accuracy_score(label_test, predictions, normalize=False))\n\nprint(metrics.confusion_matrix(predictions, label_test))\nprint(metrics.classification_report(label_test, predictions))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from tornado import gen import rethinkdb as r from .connection import connection from .utils import dump_cursor @gen.coroutine def get_promotion_keys(): conn = yield connection() result = yield r.table('promotion_keys').run(conn) result = yield dump_cursor(result) return result @gen.coroutine def pop_promotion_key(promotion_key): conn = yield connection() result = yield r.table('promotion_keys').\ get(promotion_key).delete(return_changes=True).run(conn) if result['changes']: return result['changes'][0]['old_val'] return None @gen.coroutine def create_promotion_key(showtime_id): conn = yield connection() data = { 'showtime_id': showtime_id } result = yield r.table('promotion_keys').insert(data).run(conn) promotion_key = result['generated_keys'][0] return promotion_key
normal
{ "blob_id": "66cdfdfa797c9991e5cb169c4b94a1e7041ca458", "index": 4772, "step-1": "<mask token>\n\n\n@gen.coroutine\ndef pop_promotion_key(promotion_key):\n conn = yield connection()\n result = yield r.table('promotion_keys').get(promotion_key).delete(\n return_changes=True).run(conn)\n if result['changes']:\n return result['changes'][0]['old_val']\n return None\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@gen.coroutine\ndef get_promotion_keys():\n conn = yield connection()\n result = yield r.table('promotion_keys').run(conn)\n result = yield dump_cursor(result)\n return result\n\n\n@gen.coroutine\ndef pop_promotion_key(promotion_key):\n conn = yield connection()\n result = yield r.table('promotion_keys').get(promotion_key).delete(\n return_changes=True).run(conn)\n if result['changes']:\n return result['changes'][0]['old_val']\n return None\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@gen.coroutine\ndef get_promotion_keys():\n conn = yield connection()\n result = yield r.table('promotion_keys').run(conn)\n result = yield dump_cursor(result)\n return result\n\n\n@gen.coroutine\ndef pop_promotion_key(promotion_key):\n conn = yield connection()\n result = yield r.table('promotion_keys').get(promotion_key).delete(\n return_changes=True).run(conn)\n if result['changes']:\n return result['changes'][0]['old_val']\n return None\n\n\n@gen.coroutine\ndef create_promotion_key(showtime_id):\n conn = yield connection()\n data = {'showtime_id': showtime_id}\n result = yield r.table('promotion_keys').insert(data).run(conn)\n promotion_key = result['generated_keys'][0]\n return promotion_key\n", "step-4": "from tornado import gen\nimport rethinkdb as r\nfrom .connection import connection\nfrom .utils import dump_cursor\n\n\n@gen.coroutine\ndef get_promotion_keys():\n conn = yield connection()\n result = yield r.table('promotion_keys').run(conn)\n result = yield dump_cursor(result)\n return result\n\n\n@gen.coroutine\ndef pop_promotion_key(promotion_key):\n conn = yield connection()\n result = yield r.table('promotion_keys').get(promotion_key).delete(\n return_changes=True).run(conn)\n if result['changes']:\n return result['changes'][0]['old_val']\n return None\n\n\n@gen.coroutine\ndef create_promotion_key(showtime_id):\n conn = yield connection()\n data = {'showtime_id': showtime_id}\n result = yield r.table('promotion_keys').insert(data).run(conn)\n promotion_key = result['generated_keys'][0]\n return promotion_key\n", "step-5": "from tornado import gen\nimport rethinkdb as r\n\nfrom .connection import connection\nfrom .utils import dump_cursor\n\n\n@gen.coroutine\ndef get_promotion_keys():\n conn = yield connection()\n result = yield r.table('promotion_keys').run(conn)\n result = yield dump_cursor(result)\n return result\n\n\n@gen.coroutine\ndef pop_promotion_key(promotion_key):\n conn = yield connection()\n result = yield r.table('promotion_keys').\\\n get(promotion_key).delete(return_changes=True).run(conn)\n if result['changes']:\n return result['changes'][0]['old_val']\n return None\n\n\n@gen.coroutine\ndef create_promotion_key(showtime_id):\n conn = yield connection()\n data = {\n 'showtime_id': showtime_id\n }\n result = yield r.table('promotion_keys').insert(data).run(conn)\n promotion_key = result['generated_keys'][0]\n return promotion_key\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# put your python code here a = int(input()) b = int(input()) # and i = 1 if a == b: print(a) else: while True: if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0: print(i) break else: i += 1
normal
{ "blob_id": "af5ebdcd818fdf9c607240733b7b5dbb793cf55e", "index": 7328, "step-1": "<mask token>\n", "step-2": "<mask token>\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n", "step-3": "a = int(input())\nb = int(input())\ni = 1\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n", "step-4": "# put your python code here\na = int(input())\nb = int(input())\n\n# and\ni = 1\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pytest from components import models pytestmark = pytest.mark.django_db def test_app_models(): assert models.ComponentsApp.allowed_subpage_models() == [ models.ComponentsApp, models.BannerComponent, ] def test_app_required_translatable_fields(): assert models.ComponentsApp.get_required_translatable_fields() == [] @pytest.mark.django_db def test_set_slug(en_locale): instance = models.ComponentsApp.objects.create( title_en_gb='the app', depth=2, path='/thing', ) assert instance.slug == models.ComponentsApp.slug_identity
normal
{ "blob_id": "b1622aa65422fcb69a16ad48a26fd9ed05b10382", "index": 8882, "step-1": "<mask token>\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\n<mask token>\n\n\n@pytest.mark.django_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n", "step-2": "<mask token>\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\n@pytest.mark.django_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n", "step-3": "<mask token>\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\n@pytest.mark.django_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n", "step-4": "import pytest\nfrom components import models\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\n@pytest.mark.django_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n", "step-5": "import pytest\n\nfrom components import models\n\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [\n models.ComponentsApp,\n models.BannerComponent,\n ]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\n@pytest.mark.django_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(\n title_en_gb='the app',\n depth=2,\n path='/thing',\n )\n\n assert instance.slug == models.ComponentsApp.slug_identity\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# (1) Obtain your values here (https://core.telegram.org/api/obtaining_api_id) api_id = 000000 api_hash = '00000000000000000000000' phone = '+000000000000' username = 'theone' project_id = 000000000
normal
{ "blob_id": "a5646a5d42dbf6e70e9d18f28513ee2df68a28b1", "index": 6886, "step-1": "<mask token>\n", "step-2": "api_id = 0\napi_hash = '00000000000000000000000'\nphone = '+000000000000'\nusername = 'theone'\nproject_id = 0\n", "step-3": "# (1) Obtain your values here (https://core.telegram.org/api/obtaining_api_id)\napi_id = 000000\napi_hash = '00000000000000000000000'\n\nphone = '+000000000000'\nusername = 'theone'\n\nproject_id = 000000000\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(numbers) print(numbers[1]) print(numbers[-1]) <|reserved_special_token_0|> print(numbers) del numbers[1] print(numbers) numbers.append(17) print(numbers) numbers.insert(2, 5) print(numbers) numbers.sort() print(numbers) <|reserved_special_token_1|> numbers = [3, 4, 6, 7] print(numbers) print(numbers[1]) print(numbers[-1]) numbers[1] = 3 print(numbers) del numbers[1] print(numbers) numbers.append(17) print(numbers) numbers.insert(2, 5) print(numbers) numbers.sort() print(numbers) <|reserved_special_token_1|> numbers = [3,4,6,7] # 0 1 2 3 print(numbers) print(numbers[1]) print(numbers[-1]) numbers[1] = 3 print(numbers) del numbers[1] print(numbers) numbers.append(17) print(numbers) numbers.insert(2,5) print(numbers) numbers.sort() print(numbers)
flexible
{ "blob_id": "34d3eebf6ccb19f891ccbb16db47cd6412f1cb0f", "index": 1155, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(numbers)\nprint(numbers[1])\nprint(numbers[-1])\n<mask token>\nprint(numbers)\ndel numbers[1]\nprint(numbers)\nnumbers.append(17)\nprint(numbers)\nnumbers.insert(2, 5)\nprint(numbers)\nnumbers.sort()\nprint(numbers)\n", "step-3": "numbers = [3, 4, 6, 7]\nprint(numbers)\nprint(numbers[1])\nprint(numbers[-1])\nnumbers[1] = 3\nprint(numbers)\ndel numbers[1]\nprint(numbers)\nnumbers.append(17)\nprint(numbers)\nnumbers.insert(2, 5)\nprint(numbers)\nnumbers.sort()\nprint(numbers)\n", "step-4": "numbers = [3,4,6,7]\n# 0 1 2 3\nprint(numbers)\nprint(numbers[1])\nprint(numbers[-1])\nnumbers[1] = 3\nprint(numbers)\ndel numbers[1]\nprint(numbers)\nnumbers.append(17)\nprint(numbers)\nnumbers.insert(2,5)\nprint(numbers)\nnumbers.sort()\nprint(numbers)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python """ Update the expected test outputs and inputs for rsmsummarize and rsmcompare tests. This script assumes that you have already run `nose2 -s tests` and ran the entire test suite. By doing so, the output has been generated under the given outputs directory. And that is what will be used to generate the new expected output under `tests/data/experiments`. ############################################################################################# # IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. # ############################################################################################# The script works as follows. For each experiment test: - The script locates the output under the updated outputs directory. - New and changed files in this directory are copied over to the expected test output location. - Old files in the expected test output are deleted. - Files that are already in the expected test output and have not changed are left alone. - Directories that are missing or empty under the updated test outputs are shown. - For rsmsummarize and rsmcompare tests, the same logic is also applied to input data. It is assumed that the input experiments are copies of the experiments from existing tests. Note: If running this script results in changes to the inputs for rsmcompare or rsmsummarize tests, you will need to first re-run the tests for those two tools and then, potentially, run this script again to update their test outputs. See `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_ for a further explanation of this process. The script prints a log detailing the changes made for each experiment test. :author: Nitin Madnani :author: Anastassia Loukina :author: Jeremy Biggs :organization: ETS """ import argparse import re import sys from pathlib import Path from rsmtool.test_utils import FileUpdater def main(): # noqa: D103 # set up an argument parser parser = argparse.ArgumentParser(prog="update_test_files.py") parser.add_argument( "--tests", dest="tests_dir", required=True, help="The path to the existing RSMTool tests directory", ) parser.add_argument( "--outputs", dest="outputs_dir", required=True, help="The path to the directory containing the updated test " "outputs (usually `test_outputs`)", ) # parse given command line arguments args = parser.parse_args() # print out a reminder that the user should have run the test suite run_test_suite = input("Have you already run the whole test suite? (y/n): ") if run_test_suite == "n": print("Please run the whole test suite using `nose2 -s tests` before running this script.") sys.exit(0) elif run_test_suite != "y": print("Invalid answer. Exiting.") sys.exit(1) else: print() # iterate over the given tests directory and find all files named # `test_experiment_*.py` and get their suffixes for use with the # FileUpdater object. suffixes = [ re.sub(r"test_experiment_", "", p.stem) for p in Path("tests").glob("test_experiment_*.py") ] # instantiate a FileUpdater object updater = FileUpdater( test_suffixes=suffixes, tests_directory=args.tests_dir, updated_outputs_directory=args.outputs_dir, ) # run the file updates updater.run() # now print the report from the updated object updater.print_report() if __name__ == "__main__": main()
normal
{ "blob_id": "7e20c61fa30ea93e69a2479e70449638eb52b7bb", "index": 2964, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='update_test_files.py')\n parser.add_argument('--tests', dest='tests_dir', required=True, help=\n 'The path to the existing RSMTool tests directory')\n parser.add_argument('--outputs', dest='outputs_dir', required=True,\n help=\n 'The path to the directory containing the updated test outputs (usually `test_outputs`)'\n )\n args = parser.parse_args()\n run_test_suite = input('Have you already run the whole test suite? (y/n): '\n )\n if run_test_suite == 'n':\n print(\n 'Please run the whole test suite using `nose2 -s tests` before running this script.'\n )\n sys.exit(0)\n elif run_test_suite != 'y':\n print('Invalid answer. Exiting.')\n sys.exit(1)\n else:\n print()\n suffixes = [re.sub('test_experiment_', '', p.stem) for p in Path(\n 'tests').glob('test_experiment_*.py')]\n updater = FileUpdater(test_suffixes=suffixes, tests_directory=args.\n tests_dir, updated_outputs_directory=args.outputs_dir)\n updater.run()\n updater.print_report()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python\n\"\"\"\nUpdate the expected test outputs and inputs for rsmsummarize and rsmcompare tests.\n\nThis script assumes that you have already run `nose2 -s tests` and ran the entire\ntest suite. By doing so, the output has been generated under the given outputs\ndirectory. And that is what will be used to generate the new expected output\nunder `tests/data/experiments`.\n\n#############################################################################################\n# IMPORTANT: DO NOT RUN THIS SCRIPT BEFORE RUNNING THE TEST SUITE OR IT WILL BE DISASTROUS. #\n#############################################################################################\n\nThe script works as follows. For each experiment test:\n- The script locates the output under the updated outputs directory.\n- New and changed files in this directory are copied over to the expected test\n output location.\n- Old files in the expected test output are deleted.\n- Files that are already in the expected test output and have not changed are\n left alone.\n- Directories that are missing or empty under the updated test outputs are shown.\n- For rsmsummarize and rsmcompare tests, the same logic is also applied to input\n data. It is assumed that the input experiments are copies of the experiments\n from existing tests.\n\nNote: If running this script results in changes to the inputs for rsmcompare\nor rsmsummarize tests, you will need to first re-run the tests for those two\ntools and then, potentially, run this script again to update their test outputs.\n\nSee `documentation <https://rsmtool.readthedocs.io/en/main/contributing.html#writing-new-functional-tests>`_\nfor a further explanation of this process.\n\nThe script prints a log detailing the changes made for each experiment test.\n\n:author: Nitin Madnani\n:author: Anastassia Loukina\n:author: Jeremy Biggs\n\n:organization: ETS\n\"\"\"\n\nimport argparse\nimport re\nimport sys\nfrom pathlib import Path\n\nfrom rsmtool.test_utils import FileUpdater\n\n\ndef main(): # noqa: D103\n # set up an argument parser\n parser = argparse.ArgumentParser(prog=\"update_test_files.py\")\n parser.add_argument(\n \"--tests\",\n dest=\"tests_dir\",\n required=True,\n help=\"The path to the existing RSMTool tests directory\",\n )\n parser.add_argument(\n \"--outputs\",\n dest=\"outputs_dir\",\n required=True,\n help=\"The path to the directory containing the updated test \"\n \"outputs (usually `test_outputs`)\",\n )\n\n # parse given command line arguments\n args = parser.parse_args()\n\n # print out a reminder that the user should have run the test suite\n run_test_suite = input(\"Have you already run the whole test suite? (y/n): \")\n if run_test_suite == \"n\":\n print(\"Please run the whole test suite using `nose2 -s tests` before running this script.\")\n sys.exit(0)\n elif run_test_suite != \"y\":\n print(\"Invalid answer. Exiting.\")\n sys.exit(1)\n else:\n print()\n\n # iterate over the given tests directory and find all files named\n # `test_experiment_*.py` and get their suffixes for use with the\n # FileUpdater object.\n suffixes = [\n re.sub(r\"test_experiment_\", \"\", p.stem) for p in Path(\"tests\").glob(\"test_experiment_*.py\")\n ]\n\n # instantiate a FileUpdater object\n updater = FileUpdater(\n test_suffixes=suffixes,\n tests_directory=args.tests_dir,\n updated_outputs_directory=args.outputs_dir,\n )\n\n # run the file updates\n updater.run()\n\n # now print the report from the updated object\n updater.print_report()\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from __future__ import print_function, absolute_import, division import os import h5py import glob import copy import numpy as np from tqdm import tqdm # from utils.pose import draw_skeleton from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D import poseutils.camera_utils as cameras from poseutils.view import draw_skeleton from poseutils.props import get_body_centered_axes from poseutils.transform import normalize_skeleton from poseutils.transform import normalize_zscore parents = [-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 8, 10, 11, 8, 13, 14] joints_left = [4, 5, 6, 10, 11, 12] joints_right = [1, 2, 3, 13, 14, 15] skeleton_3DPW_joints_group = [[2, 3], [5, 6], [1, 4], [0, 7], [12, 13], [9, 10], [8, 11]] NAMES_3DPW = ['']*14 NAMES_3DPW[0] = 'Hip' NAMES_3DPW[1] = 'RHip' NAMES_3DPW[2] = 'RKnee' NAMES_3DPW[3] = 'RAnkle' NAMES_3DPW[4] = 'LHip' NAMES_3DPW[5] = 'LKnee' NAMES_3DPW[6] = 'LAnkle' # NAMES_3DPW[7] = 'Spine2' NAMES_3DPW[7] = 'Neck' # NAMES_3DPW[8] = 'Head' NAMES_3DPW[8] = 'LUpperArm' NAMES_3DPW[9] = 'LElbow' NAMES_3DPW[10] = 'LWrist' NAMES_3DPW[11] = 'RUpperArm' NAMES_3DPW[12] = 'RElbow' NAMES_3DPW[13] = 'RWrist' # Human3.6m IDs for training and testing TRAIN_SUBJECTS = ['S0'] TEST_SUBJECTS = ['S0'] class TDPWDataset(object): def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=False): # TODO: Update the fps here if needed super(TDPWDataset, self).__init__() # TODO: Update camera later if needed self.cameras = None self._data_train = { "2d": np.zeros((0, 14, 2), dtype=np.float32), "3d": np.zeros((0, 14, 3), dtype=np.float32), "axes": [] } self._data_valid = { "2d": np.zeros((0, 14, 2), dtype=np.float32), "3d": np.zeros((0, 14, 3), dtype=np.float32), "axes": [] } self.mean_2d = 0.0 self.std_2d = 0.0 self.mean_3d = 0.0 self.std_3d = 0.0 self.center_2d = center_2d self.skel_norm = skel_norm self.cameras = [] self.load_data(path) def load_data(self, path, load_metrics=None): filename = os.path.splitext(os.path.basename(path))[0] indices_to_select = [0, 2, 5, 8, 1, 4, 7, 12, 16, 18, 20, 17, 19, 21] data = np.load(path, allow_pickle=True, encoding='latin1')['data'].item() data_train = data['train'] data_valid = data['test'] self._data_train['2d'] = data_train["combined_2d"][:, indices_to_select, :] self._data_train['3d'] = data_train["combined_3d_cam"][:, indices_to_select, :]*1000 self._data_valid['2d'] = data_valid["combined_2d"][:, indices_to_select, :] self._data_valid['3d'] = data_valid["combined_3d_cam"][:, indices_to_select, :]*1000 self._data_train['3d'] -= self._data_train['3d'][:, :1, :] self._data_valid['3d'] -= self._data_valid['3d'][:, :1, :] if self.center_2d: self._data_train['2d'] -= self._data_train['2d'][:, :1, :] self._data_valid['2d'] -= self._data_valid['2d'][:, :1, :] _, _, _, self._data_train['axes'] = get_body_centered_axes(self._data_train['3d']) _, _, _, self._data_valid['axes'] = get_body_centered_axes(self._data_valid['3d']) if self.skel_norm: self._data_train['2d'] = normalize_skeleton(self._data_train['2d']) self._data_valid['2d'] = normalize_skeleton(self._data_valid['2d']) # self.plot_random() self.mean_3d = np.mean(self._data_train['3d'], axis=0) self.std_3d = np.std(self._data_train['3d'], axis=0) self._data_train['3d'] = normalize_zscore(self._data_train['3d'], self.mean_3d, self.std_3d, skip_root=True) self._data_valid['3d'] = normalize_zscore(self._data_valid['3d'], self.mean_3d, self.std_3d, skip_root=True) if not self.skel_norm: self.mean_2d = np.mean(self._data_train['2d'], axis=0) self.std_2d = np.std(self._data_train['2d'], axis=0) self._data_train['2d'] = normalize_zscore(self._data_train['2d'], self.mean_2d, self.std_2d, skip_root=self.center_2d) self._data_valid['2d'] = normalize_zscore(self._data_valid['2d'], self.mean_2d, self.std_2d, skip_root=self.center_2d) def define_actions(self, action=None): all_actions = ["N"] if action is None: return all_actions if action not in all_actions: raise (ValueError, "Undefined action: {}".format(action)) return [action] def get_2d_valid(self): return [self._data_valid['2d'].reshape((-1, 14, 2))] def get_3d_valid(self): return [self._data_valid['3d'].reshape((-1, 14, 3))] def get_2d_train(self): return [self._data_train['2d'].reshape((-1, 14, 2))] def get_3d_train(self): return [self._data_train['3d'].reshape((-1, 14, 3))] def get_axes_train(self): return [self._data_train['axes'][:, :, :2]] def get_axes_valid(self): return [self._data_valid['axes'][:, :, :2]] def get_joints_group(self): return skeleton_3DPW_joints_group def plot_random(self): idx = np.random.randint(0, high=self._data_train['3d'].shape[0]) fig = plt.figure(figsize=(12, 6)) ax = fig.add_subplot(121, projection='3d') bx = fig.add_subplot(122) draw_skeleton(self._data_train['3d'][idx, :, :]/1000, ax) draw_skeleton(self._data_train['2d'][idx, :, :], bx) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") ax.set_xlim((-1, 1)) ax.set_ylim((-1, 1)) ax.set_zlim((-1, 1)) bx.set_xlim((-960, 960)) bx.set_ylim((960, -960)) plt.show()
normal
{ "blob_id": "cf6dffb28e37003212d3e3402dee58a57a7d9869", "index": 5192, "step-1": "<mask token>\n\n\nclass TDPWDataset(object):\n\n def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=\n False):\n super(TDPWDataset, self).__init__()\n self.cameras = None\n self._data_train = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self._data_valid = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self.mean_2d = 0.0\n self.std_2d = 0.0\n self.mean_3d = 0.0\n self.std_3d = 0.0\n self.center_2d = center_2d\n self.skel_norm = skel_norm\n self.cameras = []\n self.load_data(path)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_2d_train(self):\n return [self._data_train['2d'].reshape((-1, 14, 2))]\n\n def get_3d_train(self):\n return [self._data_train['3d'].reshape((-1, 14, 3))]\n\n def get_axes_train(self):\n return [self._data_train['axes'][:, :, :2]]\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TDPWDataset(object):\n\n def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=\n False):\n super(TDPWDataset, self).__init__()\n self.cameras = None\n self._data_train = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self._data_valid = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self.mean_2d = 0.0\n self.std_2d = 0.0\n self.mean_3d = 0.0\n self.std_3d = 0.0\n self.center_2d = center_2d\n self.skel_norm = skel_norm\n self.cameras = []\n self.load_data(path)\n <mask token>\n <mask token>\n <mask token>\n\n def get_3d_valid(self):\n return [self._data_valid['3d'].reshape((-1, 14, 3))]\n\n def get_2d_train(self):\n return [self._data_train['2d'].reshape((-1, 14, 2))]\n\n def get_3d_train(self):\n return [self._data_train['3d'].reshape((-1, 14, 3))]\n\n def get_axes_train(self):\n return [self._data_train['axes'][:, :, :2]]\n\n def get_axes_valid(self):\n return [self._data_valid['axes'][:, :, :2]]\n <mask token>\n\n def plot_random(self):\n idx = np.random.randint(0, high=self._data_train['3d'].shape[0])\n fig = plt.figure(figsize=(12, 6))\n ax = fig.add_subplot(121, projection='3d')\n bx = fig.add_subplot(122)\n draw_skeleton(self._data_train['3d'][idx, :, :] / 1000, ax)\n draw_skeleton(self._data_train['2d'][idx, :, :], bx)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_xlim((-1, 1))\n ax.set_ylim((-1, 1))\n ax.set_zlim((-1, 1))\n bx.set_xlim((-960, 960))\n bx.set_ylim((960, -960))\n plt.show()\n", "step-3": "<mask token>\n\n\nclass TDPWDataset(object):\n\n def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=\n False):\n super(TDPWDataset, self).__init__()\n self.cameras = None\n self._data_train = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self._data_valid = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self.mean_2d = 0.0\n self.std_2d = 0.0\n self.mean_3d = 0.0\n self.std_3d = 0.0\n self.center_2d = center_2d\n self.skel_norm = skel_norm\n self.cameras = []\n self.load_data(path)\n\n def load_data(self, path, load_metrics=None):\n filename = os.path.splitext(os.path.basename(path))[0]\n indices_to_select = [0, 2, 5, 8, 1, 4, 7, 12, 16, 18, 20, 17, 19, 21]\n data = np.load(path, allow_pickle=True, encoding='latin1')['data'\n ].item()\n data_train = data['train']\n data_valid = data['test']\n self._data_train['2d'] = data_train['combined_2d'][:,\n indices_to_select, :]\n self._data_train['3d'] = data_train['combined_3d_cam'][:,\n indices_to_select, :] * 1000\n self._data_valid['2d'] = data_valid['combined_2d'][:,\n indices_to_select, :]\n self._data_valid['3d'] = data_valid['combined_3d_cam'][:,\n indices_to_select, :] * 1000\n self._data_train['3d'] -= self._data_train['3d'][:, :1, :]\n self._data_valid['3d'] -= self._data_valid['3d'][:, :1, :]\n if self.center_2d:\n self._data_train['2d'] -= self._data_train['2d'][:, :1, :]\n self._data_valid['2d'] -= self._data_valid['2d'][:, :1, :]\n _, _, _, self._data_train['axes'] = get_body_centered_axes(self.\n _data_train['3d'])\n _, _, _, self._data_valid['axes'] = get_body_centered_axes(self.\n _data_valid['3d'])\n if self.skel_norm:\n self._data_train['2d'] = normalize_skeleton(self._data_train['2d'])\n self._data_valid['2d'] = normalize_skeleton(self._data_valid['2d'])\n self.mean_3d = np.mean(self._data_train['3d'], axis=0)\n self.std_3d = np.std(self._data_train['3d'], axis=0)\n self._data_train['3d'] = normalize_zscore(self._data_train['3d'],\n self.mean_3d, self.std_3d, skip_root=True)\n self._data_valid['3d'] = normalize_zscore(self._data_valid['3d'],\n self.mean_3d, self.std_3d, skip_root=True)\n if not self.skel_norm:\n self.mean_2d = np.mean(self._data_train['2d'], axis=0)\n self.std_2d = np.std(self._data_train['2d'], axis=0)\n self._data_train['2d'] = normalize_zscore(self._data_train['2d'\n ], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n self._data_valid['2d'] = normalize_zscore(self._data_valid['2d'\n ], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n\n def define_actions(self, action=None):\n all_actions = ['N']\n if action is None:\n return all_actions\n if action not in all_actions:\n raise (ValueError, 'Undefined action: {}'.format(action))\n return [action]\n <mask token>\n\n def get_3d_valid(self):\n return [self._data_valid['3d'].reshape((-1, 14, 3))]\n\n def get_2d_train(self):\n return [self._data_train['2d'].reshape((-1, 14, 2))]\n\n def get_3d_train(self):\n return [self._data_train['3d'].reshape((-1, 14, 3))]\n\n def get_axes_train(self):\n return [self._data_train['axes'][:, :, :2]]\n\n def get_axes_valid(self):\n return [self._data_valid['axes'][:, :, :2]]\n <mask token>\n\n def plot_random(self):\n idx = np.random.randint(0, high=self._data_train['3d'].shape[0])\n fig = plt.figure(figsize=(12, 6))\n ax = fig.add_subplot(121, projection='3d')\n bx = fig.add_subplot(122)\n draw_skeleton(self._data_train['3d'][idx, :, :] / 1000, ax)\n draw_skeleton(self._data_train['2d'][idx, :, :], bx)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_xlim((-1, 1))\n ax.set_ylim((-1, 1))\n ax.set_zlim((-1, 1))\n bx.set_xlim((-960, 960))\n bx.set_ylim((960, -960))\n plt.show()\n", "step-4": "<mask token>\n\n\nclass TDPWDataset(object):\n\n def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=\n False):\n super(TDPWDataset, self).__init__()\n self.cameras = None\n self._data_train = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self._data_valid = {'2d': np.zeros((0, 14, 2), dtype=np.float32),\n '3d': np.zeros((0, 14, 3), dtype=np.float32), 'axes': []}\n self.mean_2d = 0.0\n self.std_2d = 0.0\n self.mean_3d = 0.0\n self.std_3d = 0.0\n self.center_2d = center_2d\n self.skel_norm = skel_norm\n self.cameras = []\n self.load_data(path)\n\n def load_data(self, path, load_metrics=None):\n filename = os.path.splitext(os.path.basename(path))[0]\n indices_to_select = [0, 2, 5, 8, 1, 4, 7, 12, 16, 18, 20, 17, 19, 21]\n data = np.load(path, allow_pickle=True, encoding='latin1')['data'\n ].item()\n data_train = data['train']\n data_valid = data['test']\n self._data_train['2d'] = data_train['combined_2d'][:,\n indices_to_select, :]\n self._data_train['3d'] = data_train['combined_3d_cam'][:,\n indices_to_select, :] * 1000\n self._data_valid['2d'] = data_valid['combined_2d'][:,\n indices_to_select, :]\n self._data_valid['3d'] = data_valid['combined_3d_cam'][:,\n indices_to_select, :] * 1000\n self._data_train['3d'] -= self._data_train['3d'][:, :1, :]\n self._data_valid['3d'] -= self._data_valid['3d'][:, :1, :]\n if self.center_2d:\n self._data_train['2d'] -= self._data_train['2d'][:, :1, :]\n self._data_valid['2d'] -= self._data_valid['2d'][:, :1, :]\n _, _, _, self._data_train['axes'] = get_body_centered_axes(self.\n _data_train['3d'])\n _, _, _, self._data_valid['axes'] = get_body_centered_axes(self.\n _data_valid['3d'])\n if self.skel_norm:\n self._data_train['2d'] = normalize_skeleton(self._data_train['2d'])\n self._data_valid['2d'] = normalize_skeleton(self._data_valid['2d'])\n self.mean_3d = np.mean(self._data_train['3d'], axis=0)\n self.std_3d = np.std(self._data_train['3d'], axis=0)\n self._data_train['3d'] = normalize_zscore(self._data_train['3d'],\n self.mean_3d, self.std_3d, skip_root=True)\n self._data_valid['3d'] = normalize_zscore(self._data_valid['3d'],\n self.mean_3d, self.std_3d, skip_root=True)\n if not self.skel_norm:\n self.mean_2d = np.mean(self._data_train['2d'], axis=0)\n self.std_2d = np.std(self._data_train['2d'], axis=0)\n self._data_train['2d'] = normalize_zscore(self._data_train['2d'\n ], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n self._data_valid['2d'] = normalize_zscore(self._data_valid['2d'\n ], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n\n def define_actions(self, action=None):\n all_actions = ['N']\n if action is None:\n return all_actions\n if action not in all_actions:\n raise (ValueError, 'Undefined action: {}'.format(action))\n return [action]\n <mask token>\n\n def get_3d_valid(self):\n return [self._data_valid['3d'].reshape((-1, 14, 3))]\n\n def get_2d_train(self):\n return [self._data_train['2d'].reshape((-1, 14, 2))]\n\n def get_3d_train(self):\n return [self._data_train['3d'].reshape((-1, 14, 3))]\n\n def get_axes_train(self):\n return [self._data_train['axes'][:, :, :2]]\n\n def get_axes_valid(self):\n return [self._data_valid['axes'][:, :, :2]]\n\n def get_joints_group(self):\n return skeleton_3DPW_joints_group\n\n def plot_random(self):\n idx = np.random.randint(0, high=self._data_train['3d'].shape[0])\n fig = plt.figure(figsize=(12, 6))\n ax = fig.add_subplot(121, projection='3d')\n bx = fig.add_subplot(122)\n draw_skeleton(self._data_train['3d'][idx, :, :] / 1000, ax)\n draw_skeleton(self._data_train['2d'][idx, :, :], bx)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_xlim((-1, 1))\n ax.set_ylim((-1, 1))\n ax.set_zlim((-1, 1))\n bx.set_xlim((-960, 960))\n bx.set_ylim((960, -960))\n plt.show()\n", "step-5": "from __future__ import print_function, absolute_import, division\n\nimport os\nimport h5py\nimport glob\nimport copy\nimport numpy as np\nfrom tqdm import tqdm\n\n# from utils.pose import draw_skeleton\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport poseutils.camera_utils as cameras\nfrom poseutils.view import draw_skeleton\nfrom poseutils.props import get_body_centered_axes\nfrom poseutils.transform import normalize_skeleton\nfrom poseutils.transform import normalize_zscore\n\nparents = [-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 8, 10, 11, 8, 13, 14]\njoints_left = [4, 5, 6, 10, 11, 12]\njoints_right = [1, 2, 3, 13, 14, 15]\n \nskeleton_3DPW_joints_group = [[2, 3], [5, 6], [1, 4], [0, 7], [12, 13], [9, 10], [8, 11]]\n\nNAMES_3DPW = ['']*14\nNAMES_3DPW[0] = 'Hip'\nNAMES_3DPW[1] = 'RHip'\nNAMES_3DPW[2] = 'RKnee'\nNAMES_3DPW[3] = 'RAnkle'\nNAMES_3DPW[4] = 'LHip'\nNAMES_3DPW[5] = 'LKnee'\nNAMES_3DPW[6] = 'LAnkle'\n# NAMES_3DPW[7] = 'Spine2'\nNAMES_3DPW[7] = 'Neck'\n# NAMES_3DPW[8] = 'Head'\nNAMES_3DPW[8] = 'LUpperArm'\nNAMES_3DPW[9] = 'LElbow'\nNAMES_3DPW[10] = 'LWrist'\nNAMES_3DPW[11] = 'RUpperArm'\nNAMES_3DPW[12] = 'RElbow'\nNAMES_3DPW[13] = 'RWrist'\n\n# Human3.6m IDs for training and testing\nTRAIN_SUBJECTS = ['S0']\nTEST_SUBJECTS = ['S0']\n\nclass TDPWDataset(object):\n\n def __init__(self, path, center_2d=False, load_metrics=None, skel_norm=False):\n # TODO: Update the fps here if needed\n super(TDPWDataset, self).__init__()\n\n # TODO: Update camera later if needed\n self.cameras = None\n\n self._data_train = { \"2d\": np.zeros((0, 14, 2), dtype=np.float32), \"3d\": np.zeros((0, 14, 3), dtype=np.float32), \"axes\": [] }\n self._data_valid = { \"2d\": np.zeros((0, 14, 2), dtype=np.float32), \"3d\": np.zeros((0, 14, 3), dtype=np.float32), \"axes\": [] }\n\n self.mean_2d = 0.0\n self.std_2d = 0.0\n self.mean_3d = 0.0\n self.std_3d = 0.0\n\n self.center_2d = center_2d\n\n self.skel_norm = skel_norm\n\n self.cameras = []\n\n self.load_data(path)\n\n def load_data(self, path, load_metrics=None):\n filename = os.path.splitext(os.path.basename(path))[0]\n\n indices_to_select = [0, 2, 5, 8, 1, 4, 7, 12, 16, 18, 20, 17, 19, 21]\n\n data = np.load(path, allow_pickle=True, encoding='latin1')['data'].item()\n\n data_train = data['train']\n data_valid = data['test']\n\n self._data_train['2d'] = data_train[\"combined_2d\"][:, indices_to_select, :]\n self._data_train['3d'] = data_train[\"combined_3d_cam\"][:, indices_to_select, :]*1000\n\n self._data_valid['2d'] = data_valid[\"combined_2d\"][:, indices_to_select, :]\n self._data_valid['3d'] = data_valid[\"combined_3d_cam\"][:, indices_to_select, :]*1000\n\n self._data_train['3d'] -= self._data_train['3d'][:, :1, :]\n self._data_valid['3d'] -= self._data_valid['3d'][:, :1, :]\n \n if self.center_2d:\n self._data_train['2d'] -= self._data_train['2d'][:, :1, :]\n self._data_valid['2d'] -= self._data_valid['2d'][:, :1, :]\n\n _, _, _, self._data_train['axes'] = get_body_centered_axes(self._data_train['3d'])\n _, _, _, self._data_valid['axes'] = get_body_centered_axes(self._data_valid['3d'])\n\n if self.skel_norm:\n self._data_train['2d'] = normalize_skeleton(self._data_train['2d'])\n self._data_valid['2d'] = normalize_skeleton(self._data_valid['2d'])\n\n # self.plot_random()\n\n self.mean_3d = np.mean(self._data_train['3d'], axis=0)\n self.std_3d = np.std(self._data_train['3d'], axis=0)\n \n self._data_train['3d'] = normalize_zscore(self._data_train['3d'], self.mean_3d, self.std_3d, skip_root=True)\n self._data_valid['3d'] = normalize_zscore(self._data_valid['3d'], self.mean_3d, self.std_3d, skip_root=True)\n\n if not self.skel_norm:\n self.mean_2d = np.mean(self._data_train['2d'], axis=0)\n self.std_2d = np.std(self._data_train['2d'], axis=0)\n self._data_train['2d'] = normalize_zscore(self._data_train['2d'], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n self._data_valid['2d'] = normalize_zscore(self._data_valid['2d'], self.mean_2d, self.std_2d, skip_root=self.center_2d)\n\n def define_actions(self, action=None):\n all_actions = [\"N\"]\n\n if action is None:\n return all_actions\n\n if action not in all_actions:\n raise (ValueError, \"Undefined action: {}\".format(action))\n\n return [action]\n\n def get_2d_valid(self):\n return [self._data_valid['2d'].reshape((-1, 14, 2))]\n\n def get_3d_valid(self):\n return [self._data_valid['3d'].reshape((-1, 14, 3))]\n \n def get_2d_train(self):\n return [self._data_train['2d'].reshape((-1, 14, 2))]\n\n def get_3d_train(self):\n return [self._data_train['3d'].reshape((-1, 14, 3))]\n\n def get_axes_train(self):\n return [self._data_train['axes'][:, :, :2]]\n\n def get_axes_valid(self):\n return [self._data_valid['axes'][:, :, :2]]\n\n def get_joints_group(self):\n return skeleton_3DPW_joints_group\n\n def plot_random(self):\n\n idx = np.random.randint(0, high=self._data_train['3d'].shape[0])\n\n fig = plt.figure(figsize=(12, 6))\n ax = fig.add_subplot(121, projection='3d')\n bx = fig.add_subplot(122)\n draw_skeleton(self._data_train['3d'][idx, :, :]/1000, ax)\n draw_skeleton(self._data_train['2d'][idx, :, :], bx)\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n ax.set_xlim((-1, 1))\n ax.set_ylim((-1, 1))\n ax.set_zlim((-1, 1))\n bx.set_xlim((-960, 960))\n bx.set_ylim((960, -960))\n plt.show()", "step-ids": [ 5, 8, 10, 11, 15 ] }
[ 5, 8, 10, 11, 15 ]
# -*- coding: utf-8 -*- class Task: def __init__(self): self.title = '' self.subtasks = [] def set_title(self, title): self.title = title def set_subtasks(self, subtasks): self.subtasks = subtasks
normal
{ "blob_id": "3cf2ffbc8163c2a447016c93ff4dd13e410fff2b", "index": 7353, "step-1": "<mask token>\n", "step-2": "class Task:\n <mask token>\n <mask token>\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n", "step-3": "class Task:\n\n def __init__(self):\n self.title = ''\n self.subtasks = []\n <mask token>\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n", "step-4": "class Task:\n\n def __init__(self):\n self.title = ''\n self.subtasks = []\n\n def set_title(self, title):\n self.title = title\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n", "step-5": "# -*- coding: utf-8 -*-\nclass Task:\n def __init__(self):\n self.title = ''\n self.subtasks = []\n\n def set_title(self, title):\n self.title = title\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def largo(l, n): i = 0 cuenta = 1 valor1 = 0 valor2 = 0 while cuenta < n + 1 or cuenta == n + 1: a = l[i] b = l[i + 1] if a == b: cuenta += 1 valor1 = a i += 1 cuenta = 1 while cuenta < n or cuenta == n and i < len(l) - 1: c = l[i] d = l[i + 1] if c == d: cuenta += 1 valor2 = c i += 1 alto = abs(valor1 - valor2) return alto <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def largo(l, n): i = 0 cuenta = 1 valor1 = 0 valor2 = 0 while cuenta < n + 1 or cuenta == n + 1: a = l[i] b = l[i + 1] if a == b: cuenta += 1 valor1 = a i += 1 cuenta = 1 while cuenta < n or cuenta == n and i < len(l) - 1: c = l[i] d = l[i + 1] if c == d: cuenta += 1 valor2 = c i += 1 alto = abs(valor1 - valor2) return alto def hayBorde(l, n, h): if largo(l, n) == h: return True else: return False <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def largo(l, n): i = 0 cuenta = 1 valor1 = 0 valor2 = 0 while cuenta < n + 1 or cuenta == n + 1: a = l[i] b = l[i + 1] if a == b: cuenta += 1 valor1 = a i += 1 cuenta = 1 while cuenta < n or cuenta == n and i < len(l) - 1: c = l[i] d = l[i + 1] if c == d: cuenta += 1 valor2 = c i += 1 alto = abs(valor1 - valor2) return alto def hayBorde(l, n, h): if largo(l, n) == h: return True else: return False print(hayBorde([2, 4, 4, 4, 6, 6, 6, 10, 10], 2, 4)) <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Editor de Spyder Este es un archivo temporal. """ def largo (l, n): i=0 cuenta=1 valor1=0 valor2=0 while cuenta < n+1 or cuenta==n+1: a=l[i] b=l[i+1] if a==b: cuenta+= 1 valor1=a i+=1 cuenta=1 while cuenta < n or cuenta == n and i<len(l)-1: c=l[i] d=l[i+1] if c==d: cuenta+= 1 valor2=c i+=1 alto=abs(valor1-valor2) return alto def hayBorde(l,n,h): if largo(l,n)==h: return True else: return False print(hayBorde([2,4,4,4,6,6,6,10,10],2,4))
flexible
{ "blob_id": "f3b697e20f60e51d80d655ddf4809aa9afdfcd69", "index": 7495, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef largo(l, n):\n i = 0\n cuenta = 1\n valor1 = 0\n valor2 = 0\n while cuenta < n + 1 or cuenta == n + 1:\n a = l[i]\n b = l[i + 1]\n if a == b:\n cuenta += 1\n valor1 = a\n i += 1\n cuenta = 1\n while cuenta < n or cuenta == n and i < len(l) - 1:\n c = l[i]\n d = l[i + 1]\n if c == d:\n cuenta += 1\n valor2 = c\n i += 1\n alto = abs(valor1 - valor2)\n return alto\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef largo(l, n):\n i = 0\n cuenta = 1\n valor1 = 0\n valor2 = 0\n while cuenta < n + 1 or cuenta == n + 1:\n a = l[i]\n b = l[i + 1]\n if a == b:\n cuenta += 1\n valor1 = a\n i += 1\n cuenta = 1\n while cuenta < n or cuenta == n and i < len(l) - 1:\n c = l[i]\n d = l[i + 1]\n if c == d:\n cuenta += 1\n valor2 = c\n i += 1\n alto = abs(valor1 - valor2)\n return alto\n\n\ndef hayBorde(l, n, h):\n if largo(l, n) == h:\n return True\n else:\n return False\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef largo(l, n):\n i = 0\n cuenta = 1\n valor1 = 0\n valor2 = 0\n while cuenta < n + 1 or cuenta == n + 1:\n a = l[i]\n b = l[i + 1]\n if a == b:\n cuenta += 1\n valor1 = a\n i += 1\n cuenta = 1\n while cuenta < n or cuenta == n and i < len(l) - 1:\n c = l[i]\n d = l[i + 1]\n if c == d:\n cuenta += 1\n valor2 = c\n i += 1\n alto = abs(valor1 - valor2)\n return alto\n\n\ndef hayBorde(l, n, h):\n if largo(l, n) == h:\n return True\n else:\n return False\n\n\nprint(hayBorde([2, 4, 4, 4, 6, 6, 6, 10, 10], 2, 4))\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nEditor de Spyder\n\nEste es un archivo temporal.\n\"\"\"\n\ndef largo (l, n):\n i=0\n cuenta=1\n valor1=0\n valor2=0\n while cuenta < n+1 or cuenta==n+1:\n a=l[i]\n b=l[i+1]\n if a==b:\n cuenta+= 1\n valor1=a\n i+=1\n cuenta=1\n while cuenta < n or cuenta == n and i<len(l)-1:\n c=l[i]\n d=l[i+1]\n if c==d:\n cuenta+= 1\n valor2=c\n i+=1\n alto=abs(valor1-valor2)\n return alto\n\ndef hayBorde(l,n,h):\n if largo(l,n)==h:\n return True\n else:\n return False\n\nprint(hayBorde([2,4,4,4,6,6,6,10,10],2,4))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 3.1 on 2020-08-28 14:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api_rest', '0004_auto_20200828_0749'), ] operations = [ migrations.RemoveField( model_name='event', name='user_id', ), migrations.AddField( model_name='event', name='users', field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'), ), ]
normal
{ "blob_id": "bfd8385e8f4886b91dde59c04785134b9cd6a2b6", "index": 3893, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api_rest', '0004_auto_20200828_0749')]\n operations = [migrations.RemoveField(model_name='event', name='user_id'\n ), migrations.AddField(model_name='event', name='users', field=\n models.ManyToManyField(db_table='user_event', related_name='users',\n to='api_rest.UserE'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api_rest', '0004_auto_20200828_0749')]\n operations = [migrations.RemoveField(model_name='event', name='user_id'\n ), migrations.AddField(model_name='event', name='users', field=\n models.ManyToManyField(db_table='user_event', related_name='users',\n to='api_rest.UserE'))]\n", "step-5": "# Generated by Django 3.1 on 2020-08-28 14:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api_rest', '0004_auto_20200828_0749'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='event',\n name='user_id',\n ),\n migrations.AddField(\n model_name='event',\n name='users',\n field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from .base import * # noqa from .base import env # exemple https://github.com/pydanny/cookiecutter-django/blob/master/%7B%7Bcookiecutter.project_slug%7D%7D/config/settings/production.py # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = env("DJANGO_SECRET_KEY") # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')] # https://docs.djangoproject.com/en/dev/ref/settings/#admins # Who to sent emails when errors arise ADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')] # DATABASES # ------------------------------------------------------------------------------ DATABASES["default"] = env.db("DATABASE_URL") # noqa F405 DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405 DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405 # CACHES # ------------------------------------------------------------------------------ # avec cpanel à voir ce que l'on peut configurer avec xtremcache/varnish et django-varnish CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "", }, # "default": { # "BACKEND": "django_redis.cache.RedisCache", # "LOCATION": env("XTREM_CACHE_URL"), # "OPTIONS": { # "CLIENT_CLASS": "django_redis.client.DefaultClient", # # Mimicing memcache behavior. # # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior # "IGNORE_EXCEPTIONS": True, # }, # } } # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure SESSION_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure CSRF_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds # TODO: set this to 60 seconds first and then to 518400 once you prove the former works SECURE_HSTS_SECONDS = 60 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool( "DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True ) # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True) # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff SECURE_CONTENT_TYPE_NOSNIFF = env.bool( "DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True )
normal
{ "blob_id": "836df02495ee581f138050be6b7a7a076ea899eb", "index": 4966, "step-1": "<mask token>\n", "step-2": "<mask token>\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\nDATABASES['default'] = env.db('DATABASE_URL')\nDATABASES['default']['ATOMIC_REQUESTS'] = True\nDATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nSECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)\nSECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',\n default=True)\n", "step-3": "from .base import *\nfrom .base import env\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\nDATABASES['default'] = env.db('DATABASE_URL')\nDATABASES['default']['ATOMIC_REQUESTS'] = True\nDATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nSECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)\nSECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',\n default=True)\n", "step-4": "from .base import * # noqa\nfrom .base import env\n\n# exemple https://github.com/pydanny/cookiecutter-django/blob/master/%7B%7Bcookiecutter.project_slug%7D%7D/config/settings/production.py\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\n\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# Who to sent emails when errors arise\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\n\n# DATABASES\n# ------------------------------------------------------------------------------\nDATABASES[\"default\"] = env.db(\"DATABASE_URL\") # noqa F405\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True # noqa F405\nDATABASES[\"default\"][\"CONN_MAX_AGE\"] = env.int(\"CONN_MAX_AGE\", default=60) # noqa F405\n\n# CACHES\n# ------------------------------------------------------------------------------\n# avec cpanel à voir ce que l'on peut configurer avec xtremcache/varnish et django-varnish\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"\",\n },\n # \"default\": {\n # \"BACKEND\": \"django_redis.cache.RedisCache\",\n # \"LOCATION\": env(\"XTREM_CACHE_URL\"),\n # \"OPTIONS\": {\n # \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n # # Mimicing memcache behavior.\n # # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior\n # \"IGNORE_EXCEPTIONS\": True,\n # },\n # }\n}\n\n# SECURITY\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True)\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure\nSESSION_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure\nCSRF_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds\n# TODO: set this to 60 seconds first and then to 518400 once you prove the former works\nSECURE_HSTS_SECONDS = 60\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n \"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True\n)\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload\nSECURE_HSTS_PRELOAD = env.bool(\"DJANGO_SECURE_HSTS_PRELOAD\", default=True)\n# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\n \"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True\n)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50)) self.Bind(wx.EVT_BUTTON, self.OnButton1, b1) b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect', (50, 100)) self.Bind(wx.EVT_BUTTON, self.OnButton2, b2) self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE', 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT', 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM', 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT', 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM', 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155), size=(220, 160), style=wx.LB_SINGLE) self.list.Select(0) tt = 'Timeout in milliseconds\n0 is system default' self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx. ALIGN_LEFT) self.spin.SetToolTip(wx.ToolTip(tt)) self.spin.SetRange(0, 5000) self.spin.SetValue(0) def OnButton1(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.Show(True) <|reserved_special_token_0|> class printLog: def __init__(self): pass def write(self, txt): print('%s' % txt) def WriteText(self, txt): print('%s' % txt) class TestFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx. DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.Frame.__init__(self, parent, id, title, pos, size, style, name) log = printLog() panel = TestPanel(self, log) self.Bind(wx.EVT_CLOSE, self.OnDestroy) try: self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable))) except Exception as exc: raise exc def OnDestroy(self, event): self.Destroy() class TestApp(wx.App): def OnInit(self): gMainWin = TestFrame(None) gMainWin.SetTitle('Extended Frame Demo') gMainWin.Show() return True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MyMiniFrame(wx.MiniFrame): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50)) self.Bind(wx.EVT_BUTTON, self.OnButton1, b1) b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect', (50, 100)) self.Bind(wx.EVT_BUTTON, self.OnButton2, b2) self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE', 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT', 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM', 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT', 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM', 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155), size=(220, 160), style=wx.LB_SINGLE) self.list.Select(0) tt = 'Timeout in milliseconds\n0 is system default' self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx. ALIGN_LEFT) self.spin.SetToolTip(wx.ToolTip(tt)) self.spin.SetRange(0, 5000) self.spin.SetValue(0) def OnButton1(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.Show(True) def OnButton2(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.ShowWithEffect(effect=eval(self.list.GetString(self.list. GetSelection())), timeout=self.spin.GetValue()) class printLog: def __init__(self): pass def write(self, txt): print('%s' % txt) def WriteText(self, txt): print('%s' % txt) class TestFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx. DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.Frame.__init__(self, parent, id, title, pos, size, style, name) log = printLog() panel = TestPanel(self, log) self.Bind(wx.EVT_CLOSE, self.OnDestroy) try: self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable))) except Exception as exc: raise exc def OnDestroy(self, event): self.Destroy() class TestApp(wx.App): def OnInit(self): gMainWin = TestFrame(None) gMainWin.SetTitle('Extended Frame Demo') gMainWin.Show() return True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MyMiniFrame(wx.MiniFrame): def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx. DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name) panel = wx.Panel(self, -1) button = wx.Button(panel, 1003, 'Close Me') button.SetPosition((15, 15)) button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)') button2.SetPosition((30, 50)) self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button) self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) def OnToggleWindowStyle(self, event): self.ToggleWindowStyle(wx.STAY_ON_TOP) <|reserved_special_token_0|> <|reserved_special_token_0|> class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50)) self.Bind(wx.EVT_BUTTON, self.OnButton1, b1) b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect', (50, 100)) self.Bind(wx.EVT_BUTTON, self.OnButton2, b2) self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE', 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT', 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM', 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT', 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM', 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155), size=(220, 160), style=wx.LB_SINGLE) self.list.Select(0) tt = 'Timeout in milliseconds\n0 is system default' self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx. ALIGN_LEFT) self.spin.SetToolTip(wx.ToolTip(tt)) self.spin.SetRange(0, 5000) self.spin.SetValue(0) def OnButton1(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.Show(True) def OnButton2(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.ShowWithEffect(effect=eval(self.list.GetString(self.list. GetSelection())), timeout=self.spin.GetValue()) class printLog: def __init__(self): pass def write(self, txt): print('%s' % txt) def WriteText(self, txt): print('%s' % txt) class TestFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx. DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.Frame.__init__(self, parent, id, title, pos, size, style, name) log = printLog() panel = TestPanel(self, log) self.Bind(wx.EVT_CLOSE, self.OnDestroy) try: self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable))) except Exception as exc: raise exc def OnDestroy(self, event): self.Destroy() class TestApp(wx.App): def OnInit(self): gMainWin = TestFrame(None) gMainWin.SetTitle('Extended Frame Demo') gMainWin.Show() return True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> try: gFileDir = os.path.dirname(os.path.abspath(__file__)) except: gFileDir = os.path.dirname(os.path.abspath(sys.argv[0])) <|reserved_special_token_0|> class MyMiniFrame(wx.MiniFrame): def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx. DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name) panel = wx.Panel(self, -1) button = wx.Button(panel, 1003, 'Close Me') button.SetPosition((15, 15)) button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)') button2.SetPosition((30, 50)) self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button) self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) def OnToggleWindowStyle(self, event): self.ToggleWindowStyle(wx.STAY_ON_TOP) def OnCloseMe(self, event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50)) self.Bind(wx.EVT_BUTTON, self.OnButton1, b1) b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect', (50, 100)) self.Bind(wx.EVT_BUTTON, self.OnButton2, b2) self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE', 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT', 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM', 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT', 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM', 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155), size=(220, 160), style=wx.LB_SINGLE) self.list.Select(0) tt = 'Timeout in milliseconds\n0 is system default' self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx. ALIGN_LEFT) self.spin.SetToolTip(wx.ToolTip(tt)) self.spin.SetRange(0, 5000) self.spin.SetValue(0) def OnButton1(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.Show(True) def OnButton2(self, evt): win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.ShowWithEffect(effect=eval(self.list.GetString(self.list. GetSelection())), timeout=self.spin.GetValue()) class printLog: def __init__(self): pass def write(self, txt): print('%s' % txt) def WriteText(self, txt): print('%s' % txt) class TestFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx. DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.Frame.__init__(self, parent, id, title, pos, size, style, name) log = printLog() panel = TestPanel(self, log) self.Bind(wx.EVT_CLOSE, self.OnDestroy) try: self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable))) except Exception as exc: raise exc def OnDestroy(self, event): self.Destroy() class TestApp(wx.App): def OnInit(self): gMainWin = TestFrame(None) gMainWin.SetTitle('Extended Frame Demo') gMainWin.Show() return True if __name__ == '__main__': import sys print('Python %s.%s.%s %s' % sys.version_info[0:4]) print('wxPython %s' % wx.version()) gApp = TestApp(redirect=False, filename=None, useBestVisual=False, clearSigInt=True) gApp.MainLoop() <|reserved_special_token_1|> #!/usr/bin/env python # -*- coding: utf-8 -*- __doc__ = """\ A MiniFrame is a Frame with a small title bar. It is suitable for floating toolbars that must not take up too much screen area. In other respects, it's the same as a wx.Frame. """ __wxPyOnlineDocs__ = 'https://wxpython.org/Phoenix/docs/html/wx.MiniFrame.html' __wxPyDemoPanel__ = 'TestPanel' #-Imports----------------------------------------------------------------------- #--Python Imports. import os import sys #--wxPython Imports. import wx #-Globals----------------------------------------------------------------------- try: gFileDir = os.path.dirname(os.path.abspath(__file__)) except: gFileDir = os.path.dirname(os.path.abspath(sys.argv[0])) gBmpDir = gFileDir + os.sep + 'bitmaps' class MyMiniFrame(wx.MiniFrame): def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name) panel = wx.Panel(self, -1) button = wx.Button(panel, 1003, "Close Me") button.SetPosition((15, 15)) button2 = wx.Button(panel, -1, "ToggleWindowStyle(wx.STAY_ON_TOP)") button2.SetPosition((30, 50)) self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button) self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) def OnToggleWindowStyle(self, event): self.ToggleWindowStyle(wx.STAY_ON_TOP) def OnCloseMe(self, event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() #--------------------------------------------------------------------------- class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b1 = wx.Button(self, -1, "Create and Show a MiniFrame", (50, 50)) self.Bind(wx.EVT_BUTTON, self.OnButton1, b1) b2 = wx.Button(self, -1, "Create and Show a MiniFrame With Effect", (50, 100)) self.Bind(wx.EVT_BUTTON, self.OnButton2, b2) self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE', 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT', 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM', 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT', 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM', 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND' # 'wx.SHOW_EFFECT_MAX' ], pos=(50, 155), size=(220, 160), style=wx.LB_SINGLE) self.list.Select(0) tt = "Timeout in milliseconds\n0 is system default" self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.ALIGN_LEFT) self.spin.SetToolTip(wx.ToolTip(tt)) self.spin.SetRange(0, 5000) self.spin.SetValue(0) def OnButton1(self, evt): win = MyMiniFrame(self, -1, "This is a wx.MiniFrame", size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.Show(True) def OnButton2(self, evt): win = MyMiniFrame(self, -1, "This is a wx.MiniFrame", size=(350, 200), style=wx.DEFAULT_FRAME_STYLE) win.Centre() win.ShowWithEffect(effect=eval(self.list.GetString(self.list.GetSelection())), timeout=self.spin.GetValue()) #- __main__ Demo --------------------------------------------------------------- class printLog: def __init__(self): pass def write(self, txt): print('%s' % txt) def WriteText(self, txt): print('%s' % txt) class TestFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'): wx.Frame.__init__(self, parent, id, title, pos, size, style, name) log = printLog() panel = TestPanel(self, log) self.Bind(wx.EVT_CLOSE, self.OnDestroy) try: self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable))) except Exception as exc: raise exc def OnDestroy(self, event): self.Destroy() class TestApp(wx.App): def OnInit(self): gMainWin = TestFrame(None) gMainWin.SetTitle('Extended Frame Demo') gMainWin.Show() return True #--------------------------------------------------------------------------- if __name__ == '__main__': import sys print('Python %s.%s.%s %s' % sys.version_info[0:4]) print('wxPython %s' % wx.version()) gApp = TestApp(redirect=False, filename=None, useBestVisual=False, clearSigInt=True) gApp.MainLoop()
flexible
{ "blob_id": "b041e9577af72d2bcee3dda0cc78fa12800d53bd", "index": 2286, "step-1": "<mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n <mask token>\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n\n def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.\n DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n button = wx.Button(panel, 1003, 'Close Me')\n button.SetPosition((15, 15))\n button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)')\n button2.SetPosition((30, 50))\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n <mask token>\n <mask token>\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\n<mask token>\n", "step-4": "<mask token>\ntry:\n gFileDir = os.path.dirname(os.path.abspath(__file__))\nexcept:\n gFileDir = os.path.dirname(os.path.abspath(sys.argv[0]))\n<mask token>\n\n\nclass MyMiniFrame(wx.MiniFrame):\n\n def __init__(self, parent, id, title, pos=wx.DefaultPosition, size=wx.\n DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n button = wx.Button(panel, 1003, 'Close Me')\n button.SetPosition((15, 15))\n button2 = wx.Button(panel, -1, 'ToggleWindowStyle(wx.STAY_ON_TOP)')\n button2.SetPosition((30, 50))\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n def OnCloseMe(self, event):\n self.Close(True)\n\n def OnCloseWindow(self, event):\n self.Destroy()\n\n\nclass TestPanel(wx.Panel):\n\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n b1 = wx.Button(self, -1, 'Create and Show a MiniFrame', (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n b2 = wx.Button(self, -1, 'Create and Show a MiniFrame With Effect',\n (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT', 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP', 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT', 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP', 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND', 'wx.SHOW_EFFECT_EXPAND'], pos=(50, 155),\n size=(220, 160), style=wx.LB_SINGLE)\n self.list.Select(0)\n tt = 'Timeout in milliseconds\\n0 is system default'\n self.spin = wx.SpinCtrl(self, -1, tt, pos=(50, 130), style=wx.\n ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, 'This is a wx.MiniFrame', size=(350, \n 200), style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.\n GetSelection())), timeout=self.spin.GetValue())\n\n\nclass printLog:\n\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.\n DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n log = printLog()\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n return True\n\n\nif __name__ == '__main__':\n import sys\n print('Python %s.%s.%s %s' % sys.version_info[0:4])\n print('wxPython %s' % wx.version())\n gApp = TestApp(redirect=False, filename=None, useBestVisual=False,\n clearSigInt=True)\n gApp.MainLoop()\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__doc__ = \"\"\"\\\nA MiniFrame is a Frame with a small title bar. It is suitable for floating\ntoolbars that must not take up too much screen area. In other respects, it's the\nsame as a wx.Frame.\n\"\"\"\n\n__wxPyOnlineDocs__ = 'https://wxpython.org/Phoenix/docs/html/wx.MiniFrame.html'\n__wxPyDemoPanel__ = 'TestPanel'\n\n#-Imports-----------------------------------------------------------------------\n\n#--Python Imports.\nimport os\nimport sys\n\n#--wxPython Imports.\nimport wx\n\n\n#-Globals-----------------------------------------------------------------------\ntry:\n gFileDir = os.path.dirname(os.path.abspath(__file__))\nexcept:\n gFileDir = os.path.dirname(os.path.abspath(sys.argv[0]))\ngBmpDir = gFileDir + os.sep + 'bitmaps'\n\n\nclass MyMiniFrame(wx.MiniFrame):\n def __init__(self, parent, id, title, pos=wx.DefaultPosition,\n size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,\n name='frame'):\n\n wx.MiniFrame.__init__(self, parent, id, title, pos, size, style, name)\n panel = wx.Panel(self, -1)\n\n button = wx.Button(panel, 1003, \"Close Me\")\n button.SetPosition((15, 15))\n\n button2 = wx.Button(panel, -1, \"ToggleWindowStyle(wx.STAY_ON_TOP)\")\n button2.SetPosition((30, 50))\n\n self.Bind(wx.EVT_BUTTON, self.OnCloseMe, button)\n self.Bind(wx.EVT_BUTTON, self.OnToggleWindowStyle, button2)\n self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)\n\n\n def OnToggleWindowStyle(self, event):\n self.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n def OnCloseMe(self, event):\n self.Close(True)\n\n def OnCloseWindow(self, event):\n self.Destroy()\n\n#---------------------------------------------------------------------------\n\nclass TestPanel(wx.Panel):\n def __init__(self, parent, log):\n self.log = log\n wx.Panel.__init__(self, parent, -1)\n\n b1 = wx.Button(self, -1, \"Create and Show a MiniFrame\", (50, 50))\n self.Bind(wx.EVT_BUTTON, self.OnButton1, b1)\n\n b2 = wx.Button(self, -1, \"Create and Show a MiniFrame With Effect\", (50, 100))\n self.Bind(wx.EVT_BUTTON, self.OnButton2, b2)\n\n self.list = wx.ListBox(self, choices=['wx.SHOW_EFFECT_NONE',\n 'wx.SHOW_EFFECT_ROLL_TO_LEFT',\n 'wx.SHOW_EFFECT_ROLL_TO_RIGHT',\n 'wx.SHOW_EFFECT_ROLL_TO_TOP',\n 'wx.SHOW_EFFECT_ROLL_TO_BOTTOM',\n 'wx.SHOW_EFFECT_SLIDE_TO_LEFT',\n 'wx.SHOW_EFFECT_SLIDE_TO_RIGHT',\n 'wx.SHOW_EFFECT_SLIDE_TO_TOP',\n 'wx.SHOW_EFFECT_SLIDE_TO_BOTTOM',\n 'wx.SHOW_EFFECT_BLEND',\n 'wx.SHOW_EFFECT_EXPAND'\n # 'wx.SHOW_EFFECT_MAX'\n ],\n pos=(50, 155), size=(220, 160),\n style=wx.LB_SINGLE)\n self.list.Select(0)\n\n tt = \"Timeout in milliseconds\\n0 is system default\"\n self.spin = wx.SpinCtrl(self, -1, tt,\n pos=(50, 130), style=wx.ALIGN_LEFT)\n self.spin.SetToolTip(wx.ToolTip(tt))\n self.spin.SetRange(0, 5000)\n self.spin.SetValue(0)\n\n def OnButton1(self, evt):\n win = MyMiniFrame(self, -1, \"This is a wx.MiniFrame\", size=(350, 200),\n style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.Show(True)\n\n def OnButton2(self, evt):\n win = MyMiniFrame(self, -1, \"This is a wx.MiniFrame\", size=(350, 200),\n style=wx.DEFAULT_FRAME_STYLE)\n win.Centre()\n win.ShowWithEffect(effect=eval(self.list.GetString(self.list.GetSelection())),\n timeout=self.spin.GetValue())\n\n\n#- __main__ Demo ---------------------------------------------------------------\n\nclass printLog:\n def __init__(self):\n pass\n\n def write(self, txt):\n print('%s' % txt)\n\n def WriteText(self, txt):\n print('%s' % txt)\n\n\nclass TestFrame(wx.Frame):\n def __init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString,\n pos=wx.DefaultPosition, size=wx.DefaultSize,\n style=wx.DEFAULT_FRAME_STYLE, name='frame'):\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name)\n\n log = printLog()\n\n panel = TestPanel(self, log)\n self.Bind(wx.EVT_CLOSE, self.OnDestroy)\n\n try:\n self.SetIcon(wx.IconFromLocation(wx.IconLocation(sys.executable)))\n except Exception as exc:\n raise exc\n\n def OnDestroy(self, event):\n self.Destroy()\n\n\nclass TestApp(wx.App):\n def OnInit(self):\n gMainWin = TestFrame(None)\n gMainWin.SetTitle('Extended Frame Demo')\n gMainWin.Show()\n\n return True\n\n#---------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n import sys\n print('Python %s.%s.%s %s' % sys.version_info[0:4])\n print('wxPython %s' % wx.version())\n gApp = TestApp(redirect=False,\n filename=None,\n useBestVisual=False,\n clearSigInt=True)\n\n gApp.MainLoop()\n", "step-ids": [ 12, 14, 16, 19, 22 ] }
[ 12, 14, 16, 19, 22 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CurriculoSerializer(serializers.ModelSerializer): class Meta: model = Curriculo fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at' <|reserved_special_token_1|> from rest_framework import serializers from core.models import Curriculo class CurriculoSerializer(serializers.ModelSerializer): class Meta: model = Curriculo fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at' <|reserved_special_token_1|> from rest_framework import serializers from core.models import Curriculo class CurriculoSerializer(serializers.ModelSerializer): class Meta: model = Curriculo fields = ('id','name', 'description','image','create_at','update_at')
flexible
{ "blob_id": "029f4f015f558dbd4d6096b00c53f5f0fe69883d", "index": 1322, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Curriculo\n fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'\n", "step-3": "from rest_framework import serializers\nfrom core.models import Curriculo\n\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Curriculo\n fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'\n", "step-4": "from rest_framework import serializers\nfrom core.models import Curriculo\n\nclass CurriculoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Curriculo\n fields = ('id','name', 'description','image','create_at','update_at')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Import smtplib for the actual sending function import smtplib # Import the email modules we'll need from email.message import EmailMessage # Open the plain text file whose name is in textfile for reading. with open("testfile.txt") as fp: # Create a text/plain message msg = EmailMessage() msg.set_content("test") me = "njordan@kohanakai.com" you = ['john.pelletier@ymail.com', 'jdp2766@gmail.com'] msg['Subject'] = 'The tester email' msg['From'] = me msg['To'] = you # Send the message via our own SMTP server. s = smtplib.SMTP('smtp-relay.gmail.com', 25) s.send_message(msg) s.quit()
normal
{ "blob_id": "9feb24da78113310509664fa9efcf5f399be5335", "index": 5914, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('testfile.txt') as fp:\n msg = EmailMessage()\n msg.set_content('test')\n<mask token>\ns.send_message(msg)\ns.quit()\n", "step-3": "<mask token>\nwith open('testfile.txt') as fp:\n msg = EmailMessage()\n msg.set_content('test')\nme = 'njordan@kohanakai.com'\nyou = ['john.pelletier@ymail.com', 'jdp2766@gmail.com']\nmsg['Subject'] = 'The tester email'\nmsg['From'] = me\nmsg['To'] = you\ns = smtplib.SMTP('smtp-relay.gmail.com', 25)\ns.send_message(msg)\ns.quit()\n", "step-4": "import smtplib\nfrom email.message import EmailMessage\nwith open('testfile.txt') as fp:\n msg = EmailMessage()\n msg.set_content('test')\nme = 'njordan@kohanakai.com'\nyou = ['john.pelletier@ymail.com', 'jdp2766@gmail.com']\nmsg['Subject'] = 'The tester email'\nmsg['From'] = me\nmsg['To'] = you\ns = smtplib.SMTP('smtp-relay.gmail.com', 25)\ns.send_message(msg)\ns.quit()\n", "step-5": "# Import smtplib for the actual sending function\nimport smtplib\n\n# Import the email modules we'll need\nfrom email.message import EmailMessage\n\n# Open the plain text file whose name is in textfile for reading.\nwith open(\"testfile.txt\") as fp:\n # Create a text/plain message\n msg = EmailMessage()\n msg.set_content(\"test\")\n\nme = \"njordan@kohanakai.com\"\nyou = ['john.pelletier@ymail.com', 'jdp2766@gmail.com']\nmsg['Subject'] = 'The tester email'\nmsg['From'] = me\nmsg['To'] = you\n\n# Send the message via our own SMTP server.\ns = smtplib.SMTP('smtp-relay.gmail.com', 25)\ns.send_message(msg)\ns.quit()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" This file goes through the data to find the frequencies of words in the corpus """ import csv import time, datetime import calendar from collections import defaultdict import chardet import re REVIEW_ID_COL = 0; USER_ID_COL = 1 BUSINESS_ID_COL = 2 STARS_COL = 3 DATE_COL = 4 TEXT_COL = 5 USEFUL_COL = 6 FUNNY_COL = 7 COOL_COL = 8 pattern = re.compile('\W') with open("yelp_review.csv", encoding="utf8") as csvfile: wordFrequencies = defaultdict(int) def beautifyDate(res): # This function returns a floating point that gives the UTC # print (res) dt = time.strptime(res, '%Y-%m-%d') return calendar.timegm(dt) def getAsciiFriendlyString(text, wordFrequencies): """ Things to note about the code: this code include punctuation and immediately adds non ASCII friendly into the <unk> pile """ strings = text.lower() strings = strings.split(" ") for wrd in strings: try: wrd = re.sub(pattern, '', wrd) #print (wrd) wrd.encode('ascii') wordFrequencies[wrd] += 1 except UnicodeEncodeError: #print (":( ", wrd) wordFrequencies["<unk>"] += 1 #getAsciiFriendlyString("mooing!@ cows are the best", wordFrequencies) #print (len(wordFrequencies)) #for wrd in wordFrequencies: #print (wrd, wordFrequencies[wrd]) #wrdFrqWriter.writerow([wrd]) toyTrain = open("100k_numDate_train.csv", 'w') toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator = '\n') print ("Creating List....") readCSV = list(csv.reader(csvfile, delimiter=',')) print ("Finished creating list....") print ("Number of examples:", len(readCSV)) excludeSet = {REVIEW_ID_COL}; fieldNames = readCSV[0] print(fieldNames) readForOneHot = readCSV[1:] print ("Going through the words for the frequencies.") # Go through the set, finding the frequencies for row in readForOneHot: getAsciiFriendlyString(row[TEXT_COL], wordFrequencies) print (len(readForOneHot)) # Write the frequencies to a file (so we don't have to do this again.....) print ("creating file with word frequencies") wrdFrq = open("yelp_word_frequencies.csv", 'w') wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) wrdFrqWriter.writerow(["word", "frequency"]) for wrd in wordFrequencies: wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])
normal
{ "blob_id": "ba54b3a148a34ced74a337665ddd5f2d9084553b", "index": 1489, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('yelp_review.csv', encoding='utf8') as csvfile:\n wordFrequencies = defaultdict(int)\n\n def beautifyDate(res):\n dt = time.strptime(res, '%Y-%m-%d')\n return calendar.timegm(dt)\n\n def getAsciiFriendlyString(text, wordFrequencies):\n \"\"\"\n\t\tThings to note about the code: this code include punctuation and immediately adds non ASCII\n\t\tfriendly into the <unk> pile\n\t\t\"\"\"\n strings = text.lower()\n strings = strings.split(' ')\n for wrd in strings:\n try:\n wrd = re.sub(pattern, '', wrd)\n wrd.encode('ascii')\n wordFrequencies[wrd] += 1\n except UnicodeEncodeError:\n wordFrequencies['<unk>'] += 1\n toyTrain = open('100k_numDate_train.csv', 'w')\n toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL, lineterminator='\\n')\n print('Creating List....')\n readCSV = list(csv.reader(csvfile, delimiter=','))\n print('Finished creating list....')\n print('Number of examples:', len(readCSV))\n excludeSet = {REVIEW_ID_COL}\n fieldNames = readCSV[0]\n print(fieldNames)\n readForOneHot = readCSV[1:]\n print('Going through the words for the frequencies.')\n for row in readForOneHot:\n getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)\n print(len(readForOneHot))\n print('creating file with word frequencies')\n wrdFrq = open('yelp_word_frequencies.csv', 'w')\n wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting\n =csv.QUOTE_MINIMAL)\n wrdFrqWriter.writerow(['word', 'frequency'])\n for wrd in wordFrequencies:\n wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])\n", "step-3": "<mask token>\nREVIEW_ID_COL = 0\nUSER_ID_COL = 1\nBUSINESS_ID_COL = 2\nSTARS_COL = 3\nDATE_COL = 4\nTEXT_COL = 5\nUSEFUL_COL = 6\nFUNNY_COL = 7\nCOOL_COL = 8\npattern = re.compile('\\\\W')\nwith open('yelp_review.csv', encoding='utf8') as csvfile:\n wordFrequencies = defaultdict(int)\n\n def beautifyDate(res):\n dt = time.strptime(res, '%Y-%m-%d')\n return calendar.timegm(dt)\n\n def getAsciiFriendlyString(text, wordFrequencies):\n \"\"\"\n\t\tThings to note about the code: this code include punctuation and immediately adds non ASCII\n\t\tfriendly into the <unk> pile\n\t\t\"\"\"\n strings = text.lower()\n strings = strings.split(' ')\n for wrd in strings:\n try:\n wrd = re.sub(pattern, '', wrd)\n wrd.encode('ascii')\n wordFrequencies[wrd] += 1\n except UnicodeEncodeError:\n wordFrequencies['<unk>'] += 1\n toyTrain = open('100k_numDate_train.csv', 'w')\n toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL, lineterminator='\\n')\n print('Creating List....')\n readCSV = list(csv.reader(csvfile, delimiter=','))\n print('Finished creating list....')\n print('Number of examples:', len(readCSV))\n excludeSet = {REVIEW_ID_COL}\n fieldNames = readCSV[0]\n print(fieldNames)\n readForOneHot = readCSV[1:]\n print('Going through the words for the frequencies.')\n for row in readForOneHot:\n getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)\n print(len(readForOneHot))\n print('creating file with word frequencies')\n wrdFrq = open('yelp_word_frequencies.csv', 'w')\n wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting\n =csv.QUOTE_MINIMAL)\n wrdFrqWriter.writerow(['word', 'frequency'])\n for wrd in wordFrequencies:\n wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])\n", "step-4": "<mask token>\nimport csv\nimport time, datetime\nimport calendar\nfrom collections import defaultdict\nimport chardet\nimport re\nREVIEW_ID_COL = 0\nUSER_ID_COL = 1\nBUSINESS_ID_COL = 2\nSTARS_COL = 3\nDATE_COL = 4\nTEXT_COL = 5\nUSEFUL_COL = 6\nFUNNY_COL = 7\nCOOL_COL = 8\npattern = re.compile('\\\\W')\nwith open('yelp_review.csv', encoding='utf8') as csvfile:\n wordFrequencies = defaultdict(int)\n\n def beautifyDate(res):\n dt = time.strptime(res, '%Y-%m-%d')\n return calendar.timegm(dt)\n\n def getAsciiFriendlyString(text, wordFrequencies):\n \"\"\"\n\t\tThings to note about the code: this code include punctuation and immediately adds non ASCII\n\t\tfriendly into the <unk> pile\n\t\t\"\"\"\n strings = text.lower()\n strings = strings.split(' ')\n for wrd in strings:\n try:\n wrd = re.sub(pattern, '', wrd)\n wrd.encode('ascii')\n wordFrequencies[wrd] += 1\n except UnicodeEncodeError:\n wordFrequencies['<unk>'] += 1\n toyTrain = open('100k_numDate_train.csv', 'w')\n toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=\n csv.QUOTE_MINIMAL, lineterminator='\\n')\n print('Creating List....')\n readCSV = list(csv.reader(csvfile, delimiter=','))\n print('Finished creating list....')\n print('Number of examples:', len(readCSV))\n excludeSet = {REVIEW_ID_COL}\n fieldNames = readCSV[0]\n print(fieldNames)\n readForOneHot = readCSV[1:]\n print('Going through the words for the frequencies.')\n for row in readForOneHot:\n getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)\n print(len(readForOneHot))\n print('creating file with word frequencies')\n wrdFrq = open('yelp_word_frequencies.csv', 'w')\n wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting\n =csv.QUOTE_MINIMAL)\n wrdFrqWriter.writerow(['word', 'frequency'])\n for wrd in wordFrequencies:\n wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])\n", "step-5": "\"\"\"\r\nThis file goes through the data to find the frequencies of words in the corpus\r\n\"\"\"\r\n\r\nimport csv\r\nimport time, datetime\r\nimport calendar\r\nfrom collections import defaultdict\r\nimport chardet\r\nimport re\r\n\r\nREVIEW_ID_COL = 0;\r\nUSER_ID_COL = 1\r\nBUSINESS_ID_COL = 2\r\nSTARS_COL = 3\r\nDATE_COL = 4\r\nTEXT_COL = 5\r\nUSEFUL_COL = 6\r\nFUNNY_COL = 7\r\nCOOL_COL = 8\r\n\r\npattern = re.compile('\\W')\r\n\r\nwith open(\"yelp_review.csv\", encoding=\"utf8\") as csvfile:\r\n\twordFrequencies = defaultdict(int)\r\n\tdef beautifyDate(res): \r\n\t\t# This function returns a floating point that gives the UTC\r\n\t\t# print (res)\r\n\t\tdt = time.strptime(res, '%Y-%m-%d')\r\n\t\treturn calendar.timegm(dt)\r\n\r\n\tdef getAsciiFriendlyString(text, wordFrequencies):\r\n\t\t\"\"\"\r\n\t\tThings to note about the code: this code include punctuation and immediately adds non ASCII\r\n\t\tfriendly into the <unk> pile\r\n\t\t\"\"\"\r\n\t\tstrings = text.lower()\r\n\t\tstrings = strings.split(\" \")\r\n\t\tfor wrd in strings:\r\n\t\t\ttry:\r\n\t\t\t\twrd = re.sub(pattern, '', wrd)\r\n\t\t\t\t#print (wrd)\r\n\t\t\t\twrd.encode('ascii')\r\n\t\t\t\twordFrequencies[wrd] += 1\r\n\t\t\texcept UnicodeEncodeError:\r\n\t\t\t\t#print (\":( \", wrd)\r\n\t\t\t\twordFrequencies[\"<unk>\"] += 1\r\n\r\n\t#getAsciiFriendlyString(\"mooing!@ cows are the best\", wordFrequencies)\r\n\t#print (len(wordFrequencies))\r\n\t#for wrd in wordFrequencies:\r\n\t\t#print (wrd, wordFrequencies[wrd])\r\n\t\t#wrdFrqWriter.writerow([wrd])\r\n\ttoyTrain = open(\"100k_numDate_train.csv\", 'w')\r\n\ttoyWriter = csv.writer(toyTrain, delimiter=',',\r\n\t\tquotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator = '\\n')\r\n\tprint (\"Creating List....\")\r\n\treadCSV = list(csv.reader(csvfile, delimiter=','))\r\n\tprint (\"Finished creating list....\")\r\n\tprint (\"Number of examples:\", len(readCSV))\r\n\texcludeSet = {REVIEW_ID_COL};\r\n\r\n\tfieldNames = readCSV[0]\r\n\tprint(fieldNames)\r\n\r\n\treadForOneHot = readCSV[1:]\r\n\r\n\tprint (\"Going through the words for the frequencies.\")\r\n\t# Go through the set, finding the frequencies\r\n\tfor row in readForOneHot:\r\n\t\tgetAsciiFriendlyString(row[TEXT_COL], wordFrequencies)\r\n\r\n\tprint (len(readForOneHot))\r\n\t# Write the frequencies to a file (so we don't have to do this again.....)\r\n\tprint (\"creating file with word frequencies\")\r\n\r\n\twrdFrq = open(\"yelp_word_frequencies.csv\", 'w')\r\n\twrdFrqWriter = csv.writer(wrdFrq, delimiter=',',\r\n\t\tquotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n\twrdFrqWriter.writerow([\"word\", \"frequency\"])\r\n\tfor wrd in wordFrequencies:\r\n\t\twrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])\r\n\r\n\r\n\r\n\r\n\t\t\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> brick.sound.beep() wait(1000) motor_a.run_target(500, 720) wait(1000) brick.sound.beep(1000, 500) <|reserved_special_token_1|> <|reserved_special_token_0|> motor_a = Motor(Port.A) brick.sound.beep() wait(1000) motor_a.run_target(500, 720) wait(1000) brick.sound.beep(1000, 500) <|reserved_special_token_1|> from pybricks import ev3brick as brick from pybricks.ev3devices import Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor from pybricks.parameters import Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align from pybricks.tools import print, wait, StopWatch from pybricks.robotics import DriveBase motor_a = Motor(Port.A) brick.sound.beep() wait(1000) motor_a.run_target(500, 720) wait(1000) brick.sound.beep(1000, 500) <|reserved_special_token_1|> #!/usr/bin/env pybricks-micropython from pybricks import ev3brick as brick from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor) from pybricks.parameters import (Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align) from pybricks.tools import print, wait, StopWatch from pybricks.robotics import DriveBase # Write your program here motor_a = Motor(Port.A) brick.sound.beep() wait(1000) motor_a.run_target(500, 720) #500 degrees per second, 90 target angle wait(1000) brick.sound.beep(1000, 500) #frequency, duration
flexible
{ "blob_id": "f6ebc3c37a69e5ec49d91609db394eec4a94cedf", "index": 9982, "step-1": "<mask token>\n", "step-2": "<mask token>\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n", "step-3": "<mask token>\nmotor_a = Motor(Port.A)\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n", "step-4": "from pybricks import ev3brick as brick\nfrom pybricks.ev3devices import Motor, TouchSensor, ColorSensor, InfraredSensor, UltrasonicSensor, GyroSensor\nfrom pybricks.parameters import Port, Stop, Direction, Button, Color, SoundFile, ImageFile, Align\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.robotics import DriveBase\nmotor_a = Motor(Port.A)\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720)\nwait(1000)\nbrick.sound.beep(1000, 500)\n", "step-5": "#!/usr/bin/env pybricks-micropython\n\nfrom pybricks import ev3brick as brick\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.parameters import (Port, Stop, Direction, Button, Color,\n SoundFile, ImageFile, Align)\nfrom pybricks.tools import print, wait, StopWatch\nfrom pybricks.robotics import DriveBase\n\n# Write your program here\nmotor_a = Motor(Port.A)\n\nbrick.sound.beep()\nwait(1000)\nmotor_a.run_target(500, 720) #500 degrees per second, 90 target angle\nwait(1000)\nbrick.sound.beep(1000, 500) #frequency, duration\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# drop data to file filter import tarr.compiler_base def format_data(data): return '{0.id}: {0.payload}'.format(data) class WRITE_TO_FILE(tarr.compiler_base.Instruction): @property def __name__(self): return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename) def __init__(self, filename, formatter=format_data): self.format = formatter self.filename = filename def run(self, runner, data): # NOTE: we need to do writing in UNBUFFERED mode (buffering=0) # as potentially there are other processes writing to the same file # *NOW* with open(self.filename, 'ab', buffering=0) as f: f.write(self.format(data) + '\n') return data def clone(self): return self.__class__(filename=self.filename, formatter=self.format)
normal
{ "blob_id": "75393d39b147097a7ac1d82938ac102491ea9441", "index": 8469, "step-1": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n <mask token>\n", "step-2": "<mask token>\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-3": "<mask token>\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-4": "import tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-5": "# drop data to file filter\nimport tarr.compiler_base\n\n\ndef format_data(data):\n return '{0.id}: {0.payload}'.format(data)\n\n\nclass WRITE_TO_FILE(tarr.compiler_base.Instruction):\n\n @property\n def __name__(self):\n return 'POINT OF INTEREST - WRITE(\"{}\")'.format(self.filename)\n\n def __init__(self, filename, formatter=format_data):\n self.format = formatter\n self.filename = filename\n\n def run(self, runner, data):\n # NOTE: we need to do writing in UNBUFFERED mode (buffering=0)\n # as potentially there are other processes writing to the same file\n # *NOW*\n with open(self.filename, 'ab', buffering=0) as f:\n f.write(self.format(data) + '\\n')\n return data\n\n def clone(self):\n return self.__class__(filename=self.filename, formatter=self.format)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot, pie_plot, list_chats_plot): min_in_day = 1440 possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 15, 20, 30, 40, 45, 60] count_of_chats = len(chat_day_data) id = (id + count_of_chats) % count_of_chats smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth) smooth = possible_smooth[smooth_id] sum_score = chat_day_data[id][2] calendar = chat_day_data[id][3] companion_name = chat_day_data[id][0] def draw_main_plot_as_all(): first_day = 0 def gen_data(): nonlocal first_day calendar_dates = list(calendar.keys()) ind = [0] now = min(calendar_dates) first_day = now last = max(calendar_dates) duration = (last - now).days + 1 need_space_btw_labels = duration // 25 labels = [now] last_label = 0 t = 0 vals = [0] * duration vals[0] = calendar[now] while now != last: now += datetime.timedelta(days=1) t += 1 if now in calendar_dates: ind.append(t) vals[t] = calendar[now] if t - last_label >= need_space_btw_labels: last_label = t labels.append(str(now)) else: labels.append('') def make_smoothie(a, shift): n = len(a) res = [0] * n koef = [] for i in range(shift + 1): koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1)) for i in range(n): sum = 0 sum_k = 0 for j in range(-shift, shift + 1): if 0 <= i + j < n: k = koef[abs(j)] sum += a[i + j] * k sum_k += k res[i] = sum / sum_k return res s = int((duration / 50) ** 0.5) print(duration, s) vals = make_smoothie(vals, s) return ind, labels, vals width = 1 plot = main_plot plot.clear() ind, labels, vals = gen_data() plot.set_xticks(ind) plot.set_xticklabels(labels) plot.xaxis.set_tick_params(rotation=90) plot.bar(range(len(vals)), vals, width) def format_coord(x, y): day = int(x + 0.5) day = first_day + datetime.timedelta(days=day) val = 0 if day in calendar: val = calendar[day] if val > 512: val = str(val // 1024) + '.' + str(int(val % 1024 / 102.4 + 0.5)) val += 'Kb' return str(day) + ' ' + str(val) return str(day) plot.format_coord = format_coord def draw_main_plot_as_day(): N = min_in_day // smooth def set_smooth(score, smooth): res = [0] * N for i in range(min_in_day): res[i // smooth] += score[i] return res me_score = set_smooth(sum_score[0], smooth) he_score = set_smooth(sum_score[1], smooth) ind = np.arange(N) width = 1 def gen_time_labels(): k = int(N / 24 + 0.5) def time(t): return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10) labels = [(time(x * smooth) if x % k == 0 else '') for x in range(N)] return labels width = 0.8 plot = main_plot plot.clear() plot.set_xticks(ind) plot.set_xticklabels(gen_time_labels()) plot.xaxis.set_tick_params(rotation=90) p1 = plot.bar(ind, me_score, width) p2 = plot.bar(ind, he_score, width, bottom=me_score) plot.legend((p1[0], p2[0]), (my_name, companion_name)) def format_coord(x, y): x = int(x + 0.5) if 0 <= x < len(me_score) and me_score[x] + he_score[x]: rate = me_score[x] / (me_score[x] + he_score[x]) return f'rate: {rate * 100:.2f}%' return None plot.format_coord = format_coord def draw_main_plot(mode): if mode == 0: draw_main_plot_as_day() else: draw_main_plot_as_all() def draw_pie(): sizes = chat_day_data[id][1] explode = [0, 0, 0.1] pie_plot.clear() def get_angle(): return -90 + 360 * (sizes[2] / (2 * sum(sizes))) pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%', shadow=True, startangle=get_angle()) pie_plot.format_coord = lambda x, y: None def draw_list_chats(id): chats_above = 4 chats_bottom = 5 if count_of_chats < chats_above + 1 + chats_bottom: chats_above = id chats_bottom = count_of_chats - id - 1 if id < chats_above: chats_bottom += chats_above - id chats_above = id if id + chats_bottom >= count_of_chats: chats_bottom = count_of_chats - id - 1 plot = list_chats_plot N = chats_above + 1 + chats_bottom people = [] scores = [] for i in range(-chats_above, chats_bottom + 1): people.append(chat_day_data[i + id][0]) scores.append(sum(chat_day_data[i + id][1])) selected_chat = [0] * N selected_chat[chats_above] = scores[chats_above] plot.clear() plot.set_yticks(range(N)) plot.set_yticklabels(people) plot.invert_yaxis() plot.yaxis.tick_right() plot.invert_xaxis() plot.axes.get_xaxis().set_visible(False) bars = plot.barh(range(N), scores) plot.barh(range(N), selected_chat) plot.format_coord = lambda x, y: None for bar in bars: continue height = bar.get_y() + bar.get_height() / 2 width = bar.get_x() + bar.get_width() plot.annotate(f' {str(width)[:]}', xy=(width, height), ha= 'left', va='center') draw_main_plot(main_mode) draw_pie() draw_list_chats(id) plt.draw() <|reserved_special_token_1|> import math import datetime import numpy as np import matplotlib.pyplot as plt def draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot, pie_plot, list_chats_plot): min_in_day = 1440 possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 15, 20, 30, 40, 45, 60] count_of_chats = len(chat_day_data) id = (id + count_of_chats) % count_of_chats smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth) smooth = possible_smooth[smooth_id] sum_score = chat_day_data[id][2] calendar = chat_day_data[id][3] companion_name = chat_day_data[id][0] def draw_main_plot_as_all(): first_day = 0 def gen_data(): nonlocal first_day calendar_dates = list(calendar.keys()) ind = [0] now = min(calendar_dates) first_day = now last = max(calendar_dates) duration = (last - now).days + 1 need_space_btw_labels = duration // 25 labels = [now] last_label = 0 t = 0 vals = [0] * duration vals[0] = calendar[now] while now != last: now += datetime.timedelta(days=1) t += 1 if now in calendar_dates: ind.append(t) vals[t] = calendar[now] if t - last_label >= need_space_btw_labels: last_label = t labels.append(str(now)) else: labels.append('') def make_smoothie(a, shift): n = len(a) res = [0] * n koef = [] for i in range(shift + 1): koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1)) for i in range(n): sum = 0 sum_k = 0 for j in range(-shift, shift + 1): if 0 <= i + j < n: k = koef[abs(j)] sum += a[i + j] * k sum_k += k res[i] = sum / sum_k return res s = int((duration / 50) ** 0.5) print(duration, s) vals = make_smoothie(vals, s) return ind, labels, vals width = 1 plot = main_plot plot.clear() ind, labels, vals = gen_data() plot.set_xticks(ind) plot.set_xticklabels(labels) plot.xaxis.set_tick_params(rotation=90) plot.bar(range(len(vals)), vals, width) def format_coord(x, y): day = int(x + 0.5) day = first_day + datetime.timedelta(days=day) val = 0 if day in calendar: val = calendar[day] if val > 512: val = str(val // 1024) + '.' + str(int(val % 1024 / 102.4 + 0.5)) val += 'Kb' return str(day) + ' ' + str(val) return str(day) plot.format_coord = format_coord def draw_main_plot_as_day(): N = min_in_day // smooth def set_smooth(score, smooth): res = [0] * N for i in range(min_in_day): res[i // smooth] += score[i] return res me_score = set_smooth(sum_score[0], smooth) he_score = set_smooth(sum_score[1], smooth) ind = np.arange(N) width = 1 def gen_time_labels(): k = int(N / 24 + 0.5) def time(t): return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10) labels = [(time(x * smooth) if x % k == 0 else '') for x in range(N)] return labels width = 0.8 plot = main_plot plot.clear() plot.set_xticks(ind) plot.set_xticklabels(gen_time_labels()) plot.xaxis.set_tick_params(rotation=90) p1 = plot.bar(ind, me_score, width) p2 = plot.bar(ind, he_score, width, bottom=me_score) plot.legend((p1[0], p2[0]), (my_name, companion_name)) def format_coord(x, y): x = int(x + 0.5) if 0 <= x < len(me_score) and me_score[x] + he_score[x]: rate = me_score[x] / (me_score[x] + he_score[x]) return f'rate: {rate * 100:.2f}%' return None plot.format_coord = format_coord def draw_main_plot(mode): if mode == 0: draw_main_plot_as_day() else: draw_main_plot_as_all() def draw_pie(): sizes = chat_day_data[id][1] explode = [0, 0, 0.1] pie_plot.clear() def get_angle(): return -90 + 360 * (sizes[2] / (2 * sum(sizes))) pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%', shadow=True, startangle=get_angle()) pie_plot.format_coord = lambda x, y: None def draw_list_chats(id): chats_above = 4 chats_bottom = 5 if count_of_chats < chats_above + 1 + chats_bottom: chats_above = id chats_bottom = count_of_chats - id - 1 if id < chats_above: chats_bottom += chats_above - id chats_above = id if id + chats_bottom >= count_of_chats: chats_bottom = count_of_chats - id - 1 plot = list_chats_plot N = chats_above + 1 + chats_bottom people = [] scores = [] for i in range(-chats_above, chats_bottom + 1): people.append(chat_day_data[i + id][0]) scores.append(sum(chat_day_data[i + id][1])) selected_chat = [0] * N selected_chat[chats_above] = scores[chats_above] plot.clear() plot.set_yticks(range(N)) plot.set_yticklabels(people) plot.invert_yaxis() plot.yaxis.tick_right() plot.invert_xaxis() plot.axes.get_xaxis().set_visible(False) bars = plot.barh(range(N), scores) plot.barh(range(N), selected_chat) plot.format_coord = lambda x, y: None for bar in bars: continue height = bar.get_y() + bar.get_height() / 2 width = bar.get_x() + bar.get_width() plot.annotate(f' {str(width)[:]}', xy=(width, height), ha= 'left', va='center') draw_main_plot(main_mode) draw_pie() draw_list_chats(id) plt.draw() <|reserved_special_token_1|> import math import datetime import numpy as np import matplotlib.pyplot as plt def draw_chat( id, smooth_id, main_mode, my_name, chat_day_data, main_plot, pie_plot, list_chats_plot): min_in_day = 1440 possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60] possible_smooth = [10, 15, 20, 30, 40, 45, 60] #divisors of 1440 (minutes in day) count_of_chats = len(chat_day_data) id = (id + count_of_chats) % count_of_chats smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth) smooth = possible_smooth[smooth_id] sum_score = chat_day_data[id][2] calendar = chat_day_data[id][3] companion_name = chat_day_data[id][0] def draw_main_plot_as_all(): first_day = 0 def gen_data(): nonlocal first_day calendar_dates = list(calendar.keys()) ind = [0] now = min(calendar_dates) first_day = now last = max(calendar_dates) duration = (last - now).days + 1 need_space_btw_labels = duration // 25 labels = [now] last_label = 0 t = 0 vals = [0] * duration vals[0] = calendar[now] while now != last: now += datetime.timedelta(days=1) t += 1 if now in calendar_dates: ind.append(t) vals[t] = calendar[now] if t-last_label >= need_space_btw_labels: last_label = t labels.append(str(now)) else: labels.append("") def make_smoothie(a, shift): n = len(a) res = [0] * n koef = [] for i in range(shift+1): koef.append( max(0, math.cos(i/(shift+1))**2*2 - 1) ) for i in range(n): sum = 0 sum_k = 0 for j in range(-shift, shift+1): if 0 <= i+j < n: k = koef[abs(j)] sum += a[i+j] * k sum_k += k res[i] = sum / sum_k return res s = int((duration/50)**0.5) #random.randint(0,10) print(duration, s) vals = make_smoothie(vals, s) return ind,labels,vals width = 1 # default value plot = main_plot plot.clear() ind, labels, vals = gen_data() plot.set_xticks(ind) plot.set_xticklabels(labels) plot.xaxis.set_tick_params(rotation=90) #plot.bar(ind, vals, width) plot.bar(range(len(vals)), vals, width) def format_coord(x, y): day = int(x + 0.5) day = first_day + datetime.timedelta(days=day) #print(day,y) val = 0 if day in calendar: val = calendar[day] if val > 512: val = str(val // 1024) + "." + str(int((val % 1024 / 102.4 + 0.5))) val += "Kb" return str(day) + " " + str(val) return str(day) plot.format_coord = format_coord #plot.set_yscale('log') def draw_main_plot_as_day(): N = min_in_day // smooth def set_smooth(score, smooth): res = [0] * N for i in range(min_in_day): res[i//smooth] += score[i] #res[i] = sum(score[i*smooth:(i+1)*smooth]) return res me_score = set_smooth(sum_score[0], smooth) he_score = set_smooth(sum_score[1], smooth) ind = np.arange(N) width = 1 def gen_time_labels(): # Set step between labels for they count of be near the 24 k = int(N / 24 + 0.5) def time(t): # get time in format `h:mm` from `t` as minute return str(t//60) + ":" + str(t//10%6)+str(t%10) labels = [time(x*smooth) if x % k == 0 else "" for x in range(N)] return labels width = 0.8 # default value plot = main_plot plot.clear() plot.set_xticks(ind) plot.set_xticklabels(gen_time_labels()) plot.xaxis.set_tick_params(rotation=90) p1 = plot.bar(ind, me_score, width) p2 = plot.bar(ind, he_score, width, bottom=me_score) plot.legend((p1[0], p2[0]), (my_name, companion_name)) def format_coord(x,y): x = int(x+0.5) if 0 <= x < len(me_score) and me_score[x] + he_score[x]: rate = me_score[x] / (me_score[x] + he_score[x]) return f"rate: {rate*100:.2f}%" return None plot.format_coord = format_coord def draw_main_plot(mode): if mode == 0: draw_main_plot_as_day() else: draw_main_plot_as_all() def draw_pie(): sizes = chat_day_data[id][1] explode = [0, 0, 0.1] pie_plot.clear() def get_angle(): # Set green part (forwarded message) in central bottom part return -90 + 360*(sizes[2]/(2*sum(sizes))) pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%', shadow=True, startangle=get_angle()) pie_plot.format_coord = lambda x,y: None def draw_list_chats(id): chats_above = 4 chats_bottom = 5 if count_of_chats < chats_above + 1 + chats_bottom: chats_above = id chats_bottom = count_of_chats - id - 1 if id < chats_above: chats_bottom += chats_above - id chats_above = id if id + chats_bottom >= count_of_chats: chats_bottom = count_of_chats - id - 1 plot = list_chats_plot N = chats_above + 1 + chats_bottom people = [] scores = [] for i in range(-chats_above, chats_bottom+1): people.append(chat_day_data[i+id][0]) scores.append(sum(chat_day_data[i+id][1])) selected_chat = [0] * N selected_chat[chats_above] = scores[chats_above] plot.clear() plot.set_yticks(range(N)) plot.set_yticklabels(people) plot.invert_yaxis() plot.yaxis.tick_right() plot.invert_xaxis() plot.axes.get_xaxis().set_visible(False) #plot.axes.get_yaxis().set_ticks([]) bars = plot.barh(range(N), scores) plot.barh(range(N), selected_chat) plot.format_coord = lambda x,y: None for bar in bars: continue height = bar.get_y() + bar.get_height() / 2 width = bar.get_x() + bar.get_width() plot.annotate(f' {str(width)[:]}', xy=(width, height), ha='left', va='center') draw_main_plot(main_mode) draw_pie() draw_list_chats(id) plt.draw()
flexible
{ "blob_id": "b297a09ee19bb8069eb65eb085903b3219c6fe5a", "index": 7971, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot,\n pie_plot, list_chats_plot):\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, \n 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60]\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n\n def draw_main_plot_as_all():\n first_day = 0\n\n def gen_data():\n nonlocal first_day\n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t - last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append('')\n\n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n koef = []\n for i in range(shift + 1):\n koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1))\n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift + 1):\n if 0 <= i + j < n:\n k = koef[abs(j)]\n sum += a[i + j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n s = int((duration / 50) ** 0.5)\n print(duration, s)\n vals = make_smoothie(vals, s)\n return ind, labels, vals\n width = 1\n plot = main_plot\n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n plot.bar(range(len(vals)), vals, width)\n\n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + '.' + str(int(val % 1024 / \n 102.4 + 0.5))\n val += 'Kb'\n return str(day) + ' ' + str(val)\n return str(day)\n plot.format_coord = format_coord\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n\n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i // smooth] += score[i]\n return res\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n ind = np.arange(N)\n width = 1\n\n def gen_time_labels():\n k = int(N / 24 + 0.5)\n\n def time(t):\n return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10)\n labels = [(time(x * smooth) if x % k == 0 else '') for x in\n range(N)]\n return labels\n width = 0.8\n plot = main_plot\n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x, y):\n x = int(x + 0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f'rate: {rate * 100:.2f}%'\n return None\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1]\n pie_plot.clear()\n\n def get_angle():\n return -90 + 360 * (sizes[2] / (2 * sum(sizes)))\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode,\n autopct='%1.1f%%', shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x, y: None\n\n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom + 1):\n people.append(chat_day_data[i + id][0])\n scores.append(sum(chat_day_data[i + id][1]))\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis()\n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x, y: None\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}', xy=(width, height), ha=\n 'left', va='center')\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n", "step-3": "import math\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot,\n pie_plot, list_chats_plot):\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, \n 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60]\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n\n def draw_main_plot_as_all():\n first_day = 0\n\n def gen_data():\n nonlocal first_day\n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t - last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append('')\n\n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n koef = []\n for i in range(shift + 1):\n koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1))\n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift + 1):\n if 0 <= i + j < n:\n k = koef[abs(j)]\n sum += a[i + j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n s = int((duration / 50) ** 0.5)\n print(duration, s)\n vals = make_smoothie(vals, s)\n return ind, labels, vals\n width = 1\n plot = main_plot\n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n plot.bar(range(len(vals)), vals, width)\n\n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + '.' + str(int(val % 1024 / \n 102.4 + 0.5))\n val += 'Kb'\n return str(day) + ' ' + str(val)\n return str(day)\n plot.format_coord = format_coord\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n\n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i // smooth] += score[i]\n return res\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n ind = np.arange(N)\n width = 1\n\n def gen_time_labels():\n k = int(N / 24 + 0.5)\n\n def time(t):\n return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10)\n labels = [(time(x * smooth) if x % k == 0 else '') for x in\n range(N)]\n return labels\n width = 0.8\n plot = main_plot\n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x, y):\n x = int(x + 0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f'rate: {rate * 100:.2f}%'\n return None\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1]\n pie_plot.clear()\n\n def get_angle():\n return -90 + 360 * (sizes[2] / (2 * sum(sizes)))\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode,\n autopct='%1.1f%%', shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x, y: None\n\n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom + 1):\n people.append(chat_day_data[i + id][0])\n scores.append(sum(chat_day_data[i + id][1]))\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis()\n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x, y: None\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}', xy=(width, height), ha=\n 'left', va='center')\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n", "step-4": "import math\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef draw_chat(\n id, smooth_id, main_mode, \n my_name, chat_day_data, \n main_plot, pie_plot, list_chats_plot):\n\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60] #divisors of 1440 (minutes in day)\n \n\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n \n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n \n def draw_main_plot_as_all():\n first_day = 0\n def gen_data():\n nonlocal first_day\n \n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n \n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t-last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append(\"\")\n \n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n \n koef = []\n for i in range(shift+1):\n koef.append( max(0, math.cos(i/(shift+1))**2*2 - 1) )\n \n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift+1):\n if 0 <= i+j < n:\n k = koef[abs(j)]\n sum += a[i+j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n\n s = int((duration/50)**0.5) #random.randint(0,10)\n print(duration, s)\n vals = make_smoothie(vals, s)\n\n return ind,labels,vals\n\n width = 1 # default value\n plot = main_plot\n \n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n #plot.bar(ind, vals, width)\n plot.bar(range(len(vals)), vals, width)\n \n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n #print(day,y)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + \".\" + str(int((val % 1024 / 102.4 + 0.5)))\n val += \"Kb\"\n return str(day) + \" \" + str(val)\n return str(day)\n\n plot.format_coord = format_coord\n #plot.set_yscale('log')\n\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n \n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i//smooth] += score[i]\n #res[i] = sum(score[i*smooth:(i+1)*smooth])\n return res\n\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n\n ind = np.arange(N)\n width = 1 \n def gen_time_labels():\n # Set step between labels for they count of be near the 24\n k = int(N / 24 + 0.5) \n\n def time(t):\n # get time in format `h:mm` from `t` as minute\n return str(t//60) + \":\" + str(t//10%6)+str(t%10)\n labels = [time(x*smooth) if x % k == 0 else \"\" \n for x in range(N)]\n return labels \n\n width = 0.8 # default value\n plot = main_plot\n \n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x,y):\n x = int(x+0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f\"rate: {rate*100:.2f}%\"\n \n return None\n\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1] \n pie_plot.clear()\n\n def get_angle():\n # Set green part (forwarded message) in central bottom part\n return -90 + 360*(sizes[2]/(2*sum(sizes)))\n\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%',\n shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x,y: None\n \n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom+1):\n people.append(chat_day_data[i+id][0])\n scores.append(sum(chat_day_data[i+id][1]))\n\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis() \n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n #plot.axes.get_yaxis().set_ticks([])\n\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x,y: None\n\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}',\n xy=(width, height),\n ha='left', va='center')\n\n\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> with open('./roc.txt', 'r') as fin: with open('./roc_shuffle.txt', 'w') as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = [0] + np.random.permutation(range(1, 5)).tolist() for sen in np.take(tmp, idx).tolist(): fout.write(sen + '\n') tmp = [] fout.write(line.strip() + '\n') else: tmp.append(line.strip()) with open('./roc.txt', 'r') as fin: with open('./roc_repeat.txt', 'w') as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = random.randint(1, 4) tmp[idx] = tmp[idx][:-1] + tmp[idx] for sen in tmp: fout.write(sen + '\n') tmp = [] fout.write(line.strip() + '\n') else: tmp.append(line.strip()) with open('./roc.txt', 'r') as fin: with open('./roc_replace.txt', 'w') as fout: post, tmp = [], [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: post.append(tmp) tmp = [] else: tmp.append(line.strip().split()) data = {'1': [], '2': [], '3': [], '4': [], '5': []} for p in post: for i in range(5): data['%d' % (i + 1)].append(p[i]) random_data = data.copy() for i in range(5): random_data['%d' % (i + 1)] = np.random.permutation(random_data ['%d' % (i + 1)]) for k in range(len(post)): idx = np.random.permutation(range(1, 5))[0] for i in range(5): if i == idx: fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\n') else: fout.write(' '.join(data['%d' % (i + 1)][k]) + '\n') fout.write('------\n') <|reserved_special_token_1|> import numpy as np import random with open('./roc.txt', 'r') as fin: with open('./roc_shuffle.txt', 'w') as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = [0] + np.random.permutation(range(1, 5)).tolist() for sen in np.take(tmp, idx).tolist(): fout.write(sen + '\n') tmp = [] fout.write(line.strip() + '\n') else: tmp.append(line.strip()) with open('./roc.txt', 'r') as fin: with open('./roc_repeat.txt', 'w') as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = random.randint(1, 4) tmp[idx] = tmp[idx][:-1] + tmp[idx] for sen in tmp: fout.write(sen + '\n') tmp = [] fout.write(line.strip() + '\n') else: tmp.append(line.strip()) with open('./roc.txt', 'r') as fin: with open('./roc_replace.txt', 'w') as fout: post, tmp = [], [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: post.append(tmp) tmp = [] else: tmp.append(line.strip().split()) data = {'1': [], '2': [], '3': [], '4': [], '5': []} for p in post: for i in range(5): data['%d' % (i + 1)].append(p[i]) random_data = data.copy() for i in range(5): random_data['%d' % (i + 1)] = np.random.permutation(random_data ['%d' % (i + 1)]) for k in range(len(post)): idx = np.random.permutation(range(1, 5))[0] for i in range(5): if i == idx: fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\n') else: fout.write(' '.join(data['%d' % (i + 1)][k]) + '\n') fout.write('------\n') <|reserved_special_token_1|> import numpy as np import random with open("./roc.txt", "r") as fin: with open("./roc_shuffle.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = [0] + np.random.permutation(range(1,5)).tolist() for sen in np.take(tmp, idx).tolist(): fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_repeat.txt", "w") as fout: tmp = [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: idx = random.randint(1,4) tmp[idx] = tmp[idx][:-1] + tmp[idx] for sen in tmp: fout.write(sen+"\n") tmp = [] fout.write(line.strip()+"\n") else: tmp.append(line.strip()) with open("./roc.txt", "r") as fin: with open("./roc_replace.txt", "w") as fout: post, tmp = [], [] for k, line in enumerate(fin): i = k + 1 if i % 6 == 0: post.append(tmp) tmp = [] else: tmp.append(line.strip().split()) data = {"1":[], "2":[], "3":[], "4":[], "5":[]} for p in post: for i in range(5): data["%d"%(i+1)].append(p[i]) random_data = data.copy() for i in range(5): random_data["%d"%(i+1)] = np.random.permutation(random_data["%d"%(i+1)]) for k in range(len(post)): idx = np.random.permutation(range(1,5))[0] for i in range(5): if i == idx: fout.write(' '.join(random_data["%d"%(i+1)][k])+"\n") else: fout.write(' '.join(data["%d"%(i+1)][k])+"\n") fout.write("------\n")
flexible
{ "blob_id": "2aec0581413d4fb0ffb4090231fde0fed974bf18", "index": 27, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_shuffle.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1, 5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_repeat.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1, 4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_replace.txt', 'w') as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {'1': [], '2': [], '3': [], '4': [], '5': []}\n for p in post:\n for i in range(5):\n data['%d' % (i + 1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data['%d' % (i + 1)] = np.random.permutation(random_data\n ['%d' % (i + 1)])\n for k in range(len(post)):\n idx = np.random.permutation(range(1, 5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\\n')\n else:\n fout.write(' '.join(data['%d' % (i + 1)][k]) + '\\n')\n fout.write('------\\n')\n", "step-3": "import numpy as np\nimport random\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_shuffle.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1, 5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_repeat.txt', 'w') as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1, 4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen + '\\n')\n tmp = []\n fout.write(line.strip() + '\\n')\n else:\n tmp.append(line.strip())\nwith open('./roc.txt', 'r') as fin:\n with open('./roc_replace.txt', 'w') as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {'1': [], '2': [], '3': [], '4': [], '5': []}\n for p in post:\n for i in range(5):\n data['%d' % (i + 1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data['%d' % (i + 1)] = np.random.permutation(random_data\n ['%d' % (i + 1)])\n for k in range(len(post)):\n idx = np.random.permutation(range(1, 5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data['%d' % (i + 1)][k]) + '\\n')\n else:\n fout.write(' '.join(data['%d' % (i + 1)][k]) + '\\n')\n fout.write('------\\n')\n", "step-4": "import numpy as np\nimport random\n\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_shuffle.txt\", \"w\") as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = [0] + np.random.permutation(range(1,5)).tolist()\n for sen in np.take(tmp, idx).tolist():\n fout.write(sen+\"\\n\")\n tmp = []\n fout.write(line.strip()+\"\\n\")\n else:\n tmp.append(line.strip())\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_repeat.txt\", \"w\") as fout:\n tmp = []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n idx = random.randint(1,4)\n tmp[idx] = tmp[idx][:-1] + tmp[idx]\n for sen in tmp:\n fout.write(sen+\"\\n\")\n tmp = []\n fout.write(line.strip()+\"\\n\")\n else:\n tmp.append(line.strip())\nwith open(\"./roc.txt\", \"r\") as fin:\n with open(\"./roc_replace.txt\", \"w\") as fout:\n post, tmp = [], []\n for k, line in enumerate(fin):\n i = k + 1\n if i % 6 == 0:\n post.append(tmp)\n tmp = []\n else:\n tmp.append(line.strip().split())\n data = {\"1\":[], \"2\":[], \"3\":[], \"4\":[], \"5\":[]}\n for p in post:\n for i in range(5):\n data[\"%d\"%(i+1)].append(p[i])\n random_data = data.copy()\n for i in range(5):\n random_data[\"%d\"%(i+1)] = np.random.permutation(random_data[\"%d\"%(i+1)])\n\n for k in range(len(post)):\n idx = np.random.permutation(range(1,5))[0]\n for i in range(5):\n if i == idx:\n fout.write(' '.join(random_data[\"%d\"%(i+1)][k])+\"\\n\")\n else:\n fout.write(' '.join(data[\"%d\"%(i+1)][k])+\"\\n\")\n fout.write(\"------\\n\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os error_msg = '''The default transformer cannot handle slashes (subdirectories); try another transformer in vlermv.transformers.''' def to_path(key): if isinstance(key, tuple): if len(key) == 1: key = key[0] else: raise ValueError(error_msg) if '/' in key or '\\' in key or os.path.sep in key: raise ValueError(error_msg) return (key,) def from_path(path): if len(path) != 1: raise ValueError(error_msg) return path[0]
normal
{ "blob_id": "e4ff6d689a7da5b16786fd59d6a4707b9b6e3e7d", "index": 8076, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n", "step-3": "<mask token>\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n", "step-4": "import os\nerror_msg = \"\"\"The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.\"\"\"\n\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n return key,\n\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n", "step-5": "import os\n\nerror_msg = '''The default transformer cannot handle slashes (subdirectories);\ntry another transformer in vlermv.transformers.'''\n\ndef to_path(key):\n if isinstance(key, tuple):\n if len(key) == 1:\n key = key[0]\n else:\n raise ValueError(error_msg)\n\n if '/' in key or '\\\\' in key or os.path.sep in key:\n raise ValueError(error_msg)\n\n return (key,)\n\ndef from_path(path):\n if len(path) != 1:\n raise ValueError(error_msg)\n return path[0]\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from selenium import webdriver import time def test_check_error_page_1(): try: link = "http://suninjuly.github.io/registration1.html" browser = webdriver.Chrome() browser.get(link) # Проверяем Fisrt name* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/input') first_input.send_keys('Иван') # assert field_text=='First name*' # Проверяем Last name* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/input') first_input.send_keys('Петров') # assert field_text == 'Last name*' # Проверяем Email* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/input') first_input.send_keys('email@email.test') # assert field_text == 'Email*' # Отправляем заполненную форму button = browser.find_element_by_css_selector("button.btn") button.click() # Проверяем, что смогли зарегистрироваться # ждем загрузки страницы time.sleep(1) # находим элемент, содержащий текст welcome_text_elt = browser.find_element_by_tag_name("h1") # записываем в переменную welcome_text текст из элемента welcome_text_elt welcome_text = welcome_text_elt.text # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта assert "Congratulations! You have successfully registered!" == welcome_text finally: # ожидание чтобы визуально оценить результаты прохождения скрипта time.sleep(10) # закрываем браузер после всех манипуляций browser.quit() def test_check_error_page_2(): try: link = "http://suninjuly.github.io/registration2.html" browser = webdriver.Chrome() browser.get(link) # Проверяем Fisrt name* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group first_class"]/input') first_input.send_keys('Иван') # assert field_text=='First name*' # Проверяем Last name* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group second_class"]/input') first_input.send_keys('Петров') # assert field_text == 'Last name*' # Проверяем Email* field_text = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/label').text first_input = browser.find_element_by_xpath( '//body/div/form/div[@class="first_block"]/div[@class="form-group third_class"]/input') first_input.send_keys('email@email.test') # assert field_text == 'Email*' # Отправляем заполненную форму button = browser.find_element_by_css_selector("button.btn") button.click() # Проверяем, что смогли зарегистрироваться # ждем загрузки страницы time.sleep(1) # находим элемент, содержащий текст welcome_text_elt = browser.find_element_by_tag_name("h1") # записываем в переменную welcome_text текст из элемента welcome_text_elt welcome_text = welcome_text_elt.text # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта assert "Congratulations! You have successfully registered!" == welcome_text finally: # ожидание чтобы визуально оценить результаты прохождения скрипта time.sleep(10) # закрываем браузер после всех манипуляций browser.quit()
normal
{ "blob_id": "83ebebbb6191295adcb58b003bf1c3bcc6fb189f", "index": 7405, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_check_error_page_1():\n try:\n link = 'http://suninjuly.github.io/registration1.html'\n browser = webdriver.Chrome()\n browser.get(link)\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input'\n )\n first_input.send_keys('Иван')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input'\n )\n first_input.send_keys('Петров')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input'\n )\n first_input.send_keys('email@email.test')\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n time.sleep(1)\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n welcome_text = welcome_text_elt.text\n assert 'Congratulations! You have successfully registered!' == welcome_text\n finally:\n time.sleep(10)\n browser.quit()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_check_error_page_1():\n try:\n link = 'http://suninjuly.github.io/registration1.html'\n browser = webdriver.Chrome()\n browser.get(link)\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input'\n )\n first_input.send_keys('Иван')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input'\n )\n first_input.send_keys('Петров')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input'\n )\n first_input.send_keys('email@email.test')\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n time.sleep(1)\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n welcome_text = welcome_text_elt.text\n assert 'Congratulations! You have successfully registered!' == welcome_text\n finally:\n time.sleep(10)\n browser.quit()\n\n\ndef test_check_error_page_2():\n try:\n link = 'http://suninjuly.github.io/registration2.html'\n browser = webdriver.Chrome()\n browser.get(link)\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input'\n )\n first_input.send_keys('Иван')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input'\n )\n first_input.send_keys('Петров')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input'\n )\n first_input.send_keys('email@email.test')\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n time.sleep(1)\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n welcome_text = welcome_text_elt.text\n assert 'Congratulations! You have successfully registered!' == welcome_text\n finally:\n time.sleep(10)\n browser.quit()\n", "step-4": "from selenium import webdriver\nimport time\n\n\ndef test_check_error_page_1():\n try:\n link = 'http://suninjuly.github.io/registration1.html'\n browser = webdriver.Chrome()\n browser.get(link)\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input'\n )\n first_input.send_keys('Иван')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input'\n )\n first_input.send_keys('Петров')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input'\n )\n first_input.send_keys('email@email.test')\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n time.sleep(1)\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n welcome_text = welcome_text_elt.text\n assert 'Congratulations! You have successfully registered!' == welcome_text\n finally:\n time.sleep(10)\n browser.quit()\n\n\ndef test_check_error_page_2():\n try:\n link = 'http://suninjuly.github.io/registration2.html'\n browser = webdriver.Chrome()\n browser.get(link)\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input'\n )\n first_input.send_keys('Иван')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input'\n )\n first_input.send_keys('Петров')\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label'\n ).text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input'\n )\n first_input.send_keys('email@email.test')\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n time.sleep(1)\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n welcome_text = welcome_text_elt.text\n assert 'Congratulations! You have successfully registered!' == welcome_text\n finally:\n time.sleep(10)\n browser.quit()\n", "step-5": "from selenium import webdriver\nimport time\n\ndef test_check_error_page_1():\n try:\n link = \"http://suninjuly.github.io/registration1.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n # Проверяем Fisrt name*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input')\n first_input.send_keys('Иван')\n\n # assert field_text=='First name*'\n\n # Проверяем Last name*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input')\n first_input.send_keys('Петров')\n\n # assert field_text == 'Last name*'\n\n # Проверяем Email*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input')\n first_input.send_keys('email@email.test')\n\n # assert field_text == 'Email*'\n\n # Отправляем заполненную форму\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\n # Проверяем, что смогли зарегистрироваться\n # ждем загрузки страницы\n time.sleep(1)\n\n # находим элемент, содержащий текст\n welcome_text_elt = browser.find_element_by_tag_name(\"h1\")\n # записываем в переменную welcome_text текст из элемента welcome_text_elt\n welcome_text = welcome_text_elt.text\n\n # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта\n assert \"Congratulations! You have successfully registered!\" == welcome_text\n\n finally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\ndef test_check_error_page_2():\n try:\n link = \"http://suninjuly.github.io/registration2.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n # Проверяем Fisrt name*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group first_class\"]/input')\n first_input.send_keys('Иван')\n\n # assert field_text=='First name*'\n\n # Проверяем Last name*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group second_class\"]/input')\n first_input.send_keys('Петров')\n\n # assert field_text == 'Last name*'\n\n # Проверяем Email*\n field_text = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/label').text\n first_input = browser.find_element_by_xpath(\n '//body/div/form/div[@class=\"first_block\"]/div[@class=\"form-group third_class\"]/input')\n first_input.send_keys('email@email.test')\n\n # assert field_text == 'Email*'\n\n # Отправляем заполненную форму\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\n # Проверяем, что смогли зарегистрироваться\n # ждем загрузки страницы\n time.sleep(1)\n\n # находим элемент, содержащий текст\n welcome_text_elt = browser.find_element_by_tag_name(\"h1\")\n # записываем в переменную welcome_text текст из элемента welcome_text_elt\n welcome_text = welcome_text_elt.text\n\n # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта\n assert \"Congratulations! You have successfully registered!\" == welcome_text\n\n finally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def greedy(s, k): m_1 = random.choice(list(s.keys())) medoids = {m_1: s[m_1]} dimensions = list(range(len(s[m_1]))) s.pop(m_1) dist = {} for x in s: dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids [m_1], s[x], dimensions) for i in range(1, k): m_i = max(dist, key=lambda x: dist.get(x)) medoids[m_i] = s[m_i] dist.pop(m_i) s.pop(m_i) for x in s: dist[x] = min(dist[x], Manhattan_segmental_dist. manhattan_segmental_dist(medoids[m_i], s[x], dimensions)) return medoids <|reserved_special_token_1|> import random import Manhattan_segmental_dist def greedy(s, k): m_1 = random.choice(list(s.keys())) medoids = {m_1: s[m_1]} dimensions = list(range(len(s[m_1]))) s.pop(m_1) dist = {} for x in s: dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids [m_1], s[x], dimensions) for i in range(1, k): m_i = max(dist, key=lambda x: dist.get(x)) medoids[m_i] = s[m_i] dist.pop(m_i) s.pop(m_i) for x in s: dist[x] = min(dist[x], Manhattan_segmental_dist. manhattan_segmental_dist(medoids[m_i], s[x], dimensions)) return medoids <|reserved_special_token_1|> import random import Manhattan_segmental_dist # Greedy # s: dictionary of points # k: number of medoids # returns # k medoids from sample set s def greedy(s, k): # print("Hello Word!") m_1 = random.choice(list(s.keys())) medoids = {m_1: s[m_1]} dimensions = list(range(len(s[m_1]))) s.pop(m_1) dist = {} # compute distance between each point and medoid m1 for x in s: dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids[m_1], s[x], dimensions) for i in range(1, k): m_i = max(dist, key=lambda x: dist.get(x)) medoids[m_i] = s[m_i] dist.pop(m_i) s.pop(m_i) for x in s: dist[x] = min(dist[x], Manhattan_segmental_dist.manhattan_segmental_dist(medoids[m_i], s[x], dimensions)) return medoids
flexible
{ "blob_id": "9a02bd0bc14494db033c032003aa5baea111ea8c", "index": 7185, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef greedy(s, k):\n m_1 = random.choice(list(s.keys()))\n medoids = {m_1: s[m_1]}\n dimensions = list(range(len(s[m_1])))\n s.pop(m_1)\n dist = {}\n for x in s:\n dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids\n [m_1], s[x], dimensions)\n for i in range(1, k):\n m_i = max(dist, key=lambda x: dist.get(x))\n medoids[m_i] = s[m_i]\n dist.pop(m_i)\n s.pop(m_i)\n for x in s:\n dist[x] = min(dist[x], Manhattan_segmental_dist.\n manhattan_segmental_dist(medoids[m_i], s[x], dimensions))\n return medoids\n", "step-3": "import random\nimport Manhattan_segmental_dist\n\n\ndef greedy(s, k):\n m_1 = random.choice(list(s.keys()))\n medoids = {m_1: s[m_1]}\n dimensions = list(range(len(s[m_1])))\n s.pop(m_1)\n dist = {}\n for x in s:\n dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids\n [m_1], s[x], dimensions)\n for i in range(1, k):\n m_i = max(dist, key=lambda x: dist.get(x))\n medoids[m_i] = s[m_i]\n dist.pop(m_i)\n s.pop(m_i)\n for x in s:\n dist[x] = min(dist[x], Manhattan_segmental_dist.\n manhattan_segmental_dist(medoids[m_i], s[x], dimensions))\n return medoids\n", "step-4": "import random\nimport Manhattan_segmental_dist\n\n\n# Greedy\n# s: dictionary of points\n# k: number of medoids\n# returns\n# k medoids from sample set s\ndef greedy(s, k):\n # print(\"Hello Word!\")\n m_1 = random.choice(list(s.keys()))\n medoids = {m_1: s[m_1]}\n dimensions = list(range(len(s[m_1])))\n s.pop(m_1)\n dist = {}\n # compute distance between each point and medoid m1\n for x in s:\n dist[x] = Manhattan_segmental_dist.manhattan_segmental_dist(medoids[m_1], s[x], dimensions)\n for i in range(1, k):\n m_i = max(dist, key=lambda x: dist.get(x))\n medoids[m_i] = s[m_i]\n dist.pop(m_i)\n s.pop(m_i)\n for x in s:\n dist[x] = min(dist[x], Manhattan_segmental_dist.manhattan_segmental_dist(medoids[m_i], s[x], dimensions))\n return medoids\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import tensorflow as tf import bbox_lib def hard_negative_loss_mining(c_loss, negative_mask, k): """Hard negative mining in classification loss.""" # make sure at least one negative example k = tf.maximum(k, 1) # make sure at most all negative. k = tf.minimum(k, c_loss.shape[-1]) neg_c_loss = c_loss * negative_mask neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0] return tf.reduce_sum(neg_c_loss) def compute_loss(network_output, bboxes, labels, num_classes, c_weight, r_weight, neg_label_value, ignore_label_value, negative_ratio): """Compute loss function.""" with tf.variable_scope("losses"): batch_size = bboxes.shape[0].value one_hot_labels = tf.one_hot(labels + 1, num_classes + 1) negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32) positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels, ignore_label_value), tf.not_equal(labels, neg_label_value)), tf.float32) with tf.variable_scope("classification_loss"): classification_output = network_output[0] classification_output = tf.reshape( classification_output, [batch_size, -1, num_classes + 1]) c_loss = tf.losses.softmax_cross_entropy( one_hot_labels, classification_output, reduction=tf.losses.Reduction.NONE) num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32) pos_c_loss = tf.reduce_sum(c_loss * positive_mask) neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask, num_positive * negative_ratio) c_loss = (pos_c_loss + neg_c_loss) / batch_size with tf.variable_scope("regression_loss"): regression_output = network_output[1] regression_output = tf.reshape( regression_output, [batch_size, -1, 4]) r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=1, reduction=tf.losses.Reduction.NONE) r_loss = tf.reduce_sum( r_loss * positive_mask[..., tf.newaxis]) / batch_size return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss def predict(network_output, mask, score_threshold, neg_label_value, anchors, max_prediction, num_classes): """Decode predictions from the neural network.""" classification_output = network_output[0] batch_size, _, _, output_dim = classification_output.get_shape().as_list() regression_output = network_output[1] bbox_list = [] label_list = [] ay, ax, ah, aw = bbox_lib.get_center_coordinates_and_sizes(anchors) anchor_center_index = tf.cast(tf.transpose(tf.stack([ay, ax])), tf.int32) for single_classification_output, single_regression_output, single_mask in zip( classification_output, regression_output, mask): # num_classes + 1 due to the negative class. single_classification_output = tf.reshape( single_classification_output, [-1, num_classes + 1]) single_classification_output = tf.nn.softmax( single_classification_output, -1) max_confidence = tf.reduce_max(single_classification_output, -1) confident_mask = max_confidence > score_threshold # - 1 due to the negative class. max_index = tf.argmax(single_classification_output, 1) - 1 non_negative_mask = tf.not_equal(max_index, -1) in_mask = tf.gather_nd(single_mask, anchor_center_index) foreground_mask = tf.logical_and( in_mask, tf.logical_and(confident_mask, non_negative_mask)) valid_labels = tf.boolean_mask(max_index, foreground_mask) single_regression_output = tf.reshape(single_regression_output, [-1, 4]) predicted_bbox = bbox_lib.decode_box_with_anchor( single_regression_output, anchors) valid_boxes = tf.boolean_mask(predicted_bbox, foreground_mask) valid_confidence_score = tf.boolean_mask( max_confidence, foreground_mask) selected_indices = tf.image.non_max_suppression( valid_boxes, valid_confidence_score, max_prediction) valid_boxes = tf.gather(valid_boxes, selected_indices) valid_labels = tf.gather(valid_labels, selected_indices) bbox_list.append(valid_boxes) label_list.append(valid_labels) return bbox_list, label_list def build_model(num_classes, anchor_num_per_output): base_network_model = tf.keras.applications.resnet50.ResNet50( include_top=False, weights="imagenet") for layer in base_network_model.layers: layer.trainable = False h = base_network_model.get_layer(name='activation_39').output drop_rate = 0.5 h = tf.keras.layers.Dropout(drop_rate)(h) classification_branch = tf.keras.layers.Conv2D( (num_classes + 1) * anchor_num_per_output, (1, 1))(h) regression_branch = tf.keras.layers.Conv2D( 4 * anchor_num_per_output, (1, 1))(h) model_outputs = [classification_branch, regression_branch] return tf.keras.models.Model(base_network_model.input, model_outputs)
normal
{ "blob_id": "6e17fef4507c72190a77976e4a8b2f56880f2d6f", "index": 4895, "step-1": "<mask token>\n\n\ndef hard_negative_loss_mining(c_loss, negative_mask, k):\n \"\"\"Hard negative mining in classification loss.\"\"\"\n k = tf.maximum(k, 1)\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)\n\n\ndef compute_loss(network_output, bboxes, labels, num_classes, c_weight,\n r_weight, neg_label_value, ignore_label_value, negative_ratio):\n \"\"\"Compute loss function.\"\"\"\n with tf.variable_scope('losses'):\n batch_size = bboxes.shape[0].value\n one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)\n negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)\n positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels,\n ignore_label_value), tf.not_equal(labels, neg_label_value)), tf\n .float32)\n with tf.variable_scope('classification_loss'):\n classification_output = network_output[0]\n classification_output = tf.reshape(classification_output, [\n batch_size, -1, num_classes + 1])\n c_loss = tf.losses.softmax_cross_entropy(one_hot_labels,\n classification_output, reduction=tf.losses.Reduction.NONE)\n num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)\n pos_c_loss = tf.reduce_sum(c_loss * positive_mask)\n neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask, \n num_positive * negative_ratio)\n c_loss = (pos_c_loss + neg_c_loss) / batch_size\n with tf.variable_scope('regression_loss'):\n regression_output = network_output[1]\n regression_output = tf.reshape(regression_output, [batch_size, \n -1, 4])\n r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=\n 1, reduction=tf.losses.Reduction.NONE)\n r_loss = tf.reduce_sum(r_loss * positive_mask[..., tf.newaxis]\n ) / batch_size\n return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef hard_negative_loss_mining(c_loss, negative_mask, k):\n \"\"\"Hard negative mining in classification loss.\"\"\"\n k = tf.maximum(k, 1)\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)\n\n\ndef compute_loss(network_output, bboxes, labels, num_classes, c_weight,\n r_weight, neg_label_value, ignore_label_value, negative_ratio):\n \"\"\"Compute loss function.\"\"\"\n with tf.variable_scope('losses'):\n batch_size = bboxes.shape[0].value\n one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)\n negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)\n positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels,\n ignore_label_value), tf.not_equal(labels, neg_label_value)), tf\n .float32)\n with tf.variable_scope('classification_loss'):\n classification_output = network_output[0]\n classification_output = tf.reshape(classification_output, [\n batch_size, -1, num_classes + 1])\n c_loss = tf.losses.softmax_cross_entropy(one_hot_labels,\n classification_output, reduction=tf.losses.Reduction.NONE)\n num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)\n pos_c_loss = tf.reduce_sum(c_loss * positive_mask)\n neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask, \n num_positive * negative_ratio)\n c_loss = (pos_c_loss + neg_c_loss) / batch_size\n with tf.variable_scope('regression_loss'):\n regression_output = network_output[1]\n regression_output = tf.reshape(regression_output, [batch_size, \n -1, 4])\n r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=\n 1, reduction=tf.losses.Reduction.NONE)\n r_loss = tf.reduce_sum(r_loss * positive_mask[..., tf.newaxis]\n ) / batch_size\n return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss\n\n\n<mask token>\n\n\ndef build_model(num_classes, anchor_num_per_output):\n base_network_model = tf.keras.applications.resnet50.ResNet50(include_top\n =False, weights='imagenet')\n for layer in base_network_model.layers:\n layer.trainable = False\n h = base_network_model.get_layer(name='activation_39').output\n drop_rate = 0.5\n h = tf.keras.layers.Dropout(drop_rate)(h)\n classification_branch = tf.keras.layers.Conv2D((num_classes + 1) *\n anchor_num_per_output, (1, 1))(h)\n regression_branch = tf.keras.layers.Conv2D(4 * anchor_num_per_output, (\n 1, 1))(h)\n model_outputs = [classification_branch, regression_branch]\n return tf.keras.models.Model(base_network_model.input, model_outputs)\n", "step-3": "<mask token>\n\n\ndef hard_negative_loss_mining(c_loss, negative_mask, k):\n \"\"\"Hard negative mining in classification loss.\"\"\"\n k = tf.maximum(k, 1)\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)\n\n\ndef compute_loss(network_output, bboxes, labels, num_classes, c_weight,\n r_weight, neg_label_value, ignore_label_value, negative_ratio):\n \"\"\"Compute loss function.\"\"\"\n with tf.variable_scope('losses'):\n batch_size = bboxes.shape[0].value\n one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)\n negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)\n positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels,\n ignore_label_value), tf.not_equal(labels, neg_label_value)), tf\n .float32)\n with tf.variable_scope('classification_loss'):\n classification_output = network_output[0]\n classification_output = tf.reshape(classification_output, [\n batch_size, -1, num_classes + 1])\n c_loss = tf.losses.softmax_cross_entropy(one_hot_labels,\n classification_output, reduction=tf.losses.Reduction.NONE)\n num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)\n pos_c_loss = tf.reduce_sum(c_loss * positive_mask)\n neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask, \n num_positive * negative_ratio)\n c_loss = (pos_c_loss + neg_c_loss) / batch_size\n with tf.variable_scope('regression_loss'):\n regression_output = network_output[1]\n regression_output = tf.reshape(regression_output, [batch_size, \n -1, 4])\n r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=\n 1, reduction=tf.losses.Reduction.NONE)\n r_loss = tf.reduce_sum(r_loss * positive_mask[..., tf.newaxis]\n ) / batch_size\n return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss\n\n\ndef predict(network_output, mask, score_threshold, neg_label_value, anchors,\n max_prediction, num_classes):\n \"\"\"Decode predictions from the neural network.\"\"\"\n classification_output = network_output[0]\n batch_size, _, _, output_dim = classification_output.get_shape().as_list()\n regression_output = network_output[1]\n bbox_list = []\n label_list = []\n ay, ax, ah, aw = bbox_lib.get_center_coordinates_and_sizes(anchors)\n anchor_center_index = tf.cast(tf.transpose(tf.stack([ay, ax])), tf.int32)\n for single_classification_output, single_regression_output, single_mask in zip(\n classification_output, regression_output, mask):\n single_classification_output = tf.reshape(single_classification_output,\n [-1, num_classes + 1])\n single_classification_output = tf.nn.softmax(\n single_classification_output, -1)\n max_confidence = tf.reduce_max(single_classification_output, -1)\n confident_mask = max_confidence > score_threshold\n max_index = tf.argmax(single_classification_output, 1) - 1\n non_negative_mask = tf.not_equal(max_index, -1)\n in_mask = tf.gather_nd(single_mask, anchor_center_index)\n foreground_mask = tf.logical_and(in_mask, tf.logical_and(\n confident_mask, non_negative_mask))\n valid_labels = tf.boolean_mask(max_index, foreground_mask)\n single_regression_output = tf.reshape(single_regression_output, [-1, 4]\n )\n predicted_bbox = bbox_lib.decode_box_with_anchor(\n single_regression_output, anchors)\n valid_boxes = tf.boolean_mask(predicted_bbox, foreground_mask)\n valid_confidence_score = tf.boolean_mask(max_confidence,\n foreground_mask)\n selected_indices = tf.image.non_max_suppression(valid_boxes,\n valid_confidence_score, max_prediction)\n valid_boxes = tf.gather(valid_boxes, selected_indices)\n valid_labels = tf.gather(valid_labels, selected_indices)\n bbox_list.append(valid_boxes)\n label_list.append(valid_labels)\n return bbox_list, label_list\n\n\ndef build_model(num_classes, anchor_num_per_output):\n base_network_model = tf.keras.applications.resnet50.ResNet50(include_top\n =False, weights='imagenet')\n for layer in base_network_model.layers:\n layer.trainable = False\n h = base_network_model.get_layer(name='activation_39').output\n drop_rate = 0.5\n h = tf.keras.layers.Dropout(drop_rate)(h)\n classification_branch = tf.keras.layers.Conv2D((num_classes + 1) *\n anchor_num_per_output, (1, 1))(h)\n regression_branch = tf.keras.layers.Conv2D(4 * anchor_num_per_output, (\n 1, 1))(h)\n model_outputs = [classification_branch, regression_branch]\n return tf.keras.models.Model(base_network_model.input, model_outputs)\n", "step-4": "import tensorflow as tf\nimport bbox_lib\n\n\ndef hard_negative_loss_mining(c_loss, negative_mask, k):\n \"\"\"Hard negative mining in classification loss.\"\"\"\n k = tf.maximum(k, 1)\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)\n\n\ndef compute_loss(network_output, bboxes, labels, num_classes, c_weight,\n r_weight, neg_label_value, ignore_label_value, negative_ratio):\n \"\"\"Compute loss function.\"\"\"\n with tf.variable_scope('losses'):\n batch_size = bboxes.shape[0].value\n one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)\n negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)\n positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels,\n ignore_label_value), tf.not_equal(labels, neg_label_value)), tf\n .float32)\n with tf.variable_scope('classification_loss'):\n classification_output = network_output[0]\n classification_output = tf.reshape(classification_output, [\n batch_size, -1, num_classes + 1])\n c_loss = tf.losses.softmax_cross_entropy(one_hot_labels,\n classification_output, reduction=tf.losses.Reduction.NONE)\n num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)\n pos_c_loss = tf.reduce_sum(c_loss * positive_mask)\n neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask, \n num_positive * negative_ratio)\n c_loss = (pos_c_loss + neg_c_loss) / batch_size\n with tf.variable_scope('regression_loss'):\n regression_output = network_output[1]\n regression_output = tf.reshape(regression_output, [batch_size, \n -1, 4])\n r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=\n 1, reduction=tf.losses.Reduction.NONE)\n r_loss = tf.reduce_sum(r_loss * positive_mask[..., tf.newaxis]\n ) / batch_size\n return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss\n\n\ndef predict(network_output, mask, score_threshold, neg_label_value, anchors,\n max_prediction, num_classes):\n \"\"\"Decode predictions from the neural network.\"\"\"\n classification_output = network_output[0]\n batch_size, _, _, output_dim = classification_output.get_shape().as_list()\n regression_output = network_output[1]\n bbox_list = []\n label_list = []\n ay, ax, ah, aw = bbox_lib.get_center_coordinates_and_sizes(anchors)\n anchor_center_index = tf.cast(tf.transpose(tf.stack([ay, ax])), tf.int32)\n for single_classification_output, single_regression_output, single_mask in zip(\n classification_output, regression_output, mask):\n single_classification_output = tf.reshape(single_classification_output,\n [-1, num_classes + 1])\n single_classification_output = tf.nn.softmax(\n single_classification_output, -1)\n max_confidence = tf.reduce_max(single_classification_output, -1)\n confident_mask = max_confidence > score_threshold\n max_index = tf.argmax(single_classification_output, 1) - 1\n non_negative_mask = tf.not_equal(max_index, -1)\n in_mask = tf.gather_nd(single_mask, anchor_center_index)\n foreground_mask = tf.logical_and(in_mask, tf.logical_and(\n confident_mask, non_negative_mask))\n valid_labels = tf.boolean_mask(max_index, foreground_mask)\n single_regression_output = tf.reshape(single_regression_output, [-1, 4]\n )\n predicted_bbox = bbox_lib.decode_box_with_anchor(\n single_regression_output, anchors)\n valid_boxes = tf.boolean_mask(predicted_bbox, foreground_mask)\n valid_confidence_score = tf.boolean_mask(max_confidence,\n foreground_mask)\n selected_indices = tf.image.non_max_suppression(valid_boxes,\n valid_confidence_score, max_prediction)\n valid_boxes = tf.gather(valid_boxes, selected_indices)\n valid_labels = tf.gather(valid_labels, selected_indices)\n bbox_list.append(valid_boxes)\n label_list.append(valid_labels)\n return bbox_list, label_list\n\n\ndef build_model(num_classes, anchor_num_per_output):\n base_network_model = tf.keras.applications.resnet50.ResNet50(include_top\n =False, weights='imagenet')\n for layer in base_network_model.layers:\n layer.trainable = False\n h = base_network_model.get_layer(name='activation_39').output\n drop_rate = 0.5\n h = tf.keras.layers.Dropout(drop_rate)(h)\n classification_branch = tf.keras.layers.Conv2D((num_classes + 1) *\n anchor_num_per_output, (1, 1))(h)\n regression_branch = tf.keras.layers.Conv2D(4 * anchor_num_per_output, (\n 1, 1))(h)\n model_outputs = [classification_branch, regression_branch]\n return tf.keras.models.Model(base_network_model.input, model_outputs)\n", "step-5": "import tensorflow as tf\nimport bbox_lib\n\n\ndef hard_negative_loss_mining(c_loss, negative_mask, k):\n \"\"\"Hard negative mining in classification loss.\"\"\"\n # make sure at least one negative example\n k = tf.maximum(k, 1)\n # make sure at most all negative.\n k = tf.minimum(k, c_loss.shape[-1])\n neg_c_loss = c_loss * negative_mask\n neg_c_loss = tf.nn.top_k(neg_c_loss, k)[0]\n return tf.reduce_sum(neg_c_loss)\n\n\ndef compute_loss(network_output, bboxes, labels, num_classes, c_weight, r_weight,\n neg_label_value, ignore_label_value, negative_ratio):\n \"\"\"Compute loss function.\"\"\"\n\n with tf.variable_scope(\"losses\"):\n batch_size = bboxes.shape[0].value\n one_hot_labels = tf.one_hot(labels + 1, num_classes + 1)\n negative_mask = tf.cast(tf.equal(labels, neg_label_value), tf.float32)\n positive_mask = tf.cast(tf.logical_and(tf.not_equal(labels, ignore_label_value),\n tf.not_equal(labels, neg_label_value)), tf.float32)\n\n with tf.variable_scope(\"classification_loss\"):\n classification_output = network_output[0]\n classification_output = tf.reshape(\n classification_output, [batch_size, -1, num_classes + 1])\n\n c_loss = tf.losses.softmax_cross_entropy(\n one_hot_labels, classification_output, reduction=tf.losses.Reduction.NONE)\n\n num_positive = tf.cast(tf.reduce_sum(positive_mask), tf.int32)\n pos_c_loss = tf.reduce_sum(c_loss * positive_mask)\n neg_c_loss = hard_negative_loss_mining(c_loss, negative_mask,\n num_positive * negative_ratio)\n\n c_loss = (pos_c_loss + neg_c_loss) / batch_size\n\n with tf.variable_scope(\"regression_loss\"):\n regression_output = network_output[1]\n regression_output = tf.reshape(\n regression_output, [batch_size, -1, 4])\n r_loss = tf.losses.huber_loss(regression_output, bboxes, delta=1,\n reduction=tf.losses.Reduction.NONE)\n\n r_loss = tf.reduce_sum(\n r_loss * positive_mask[..., tf.newaxis]) / batch_size\n\n return c_weight * c_loss + r_weight * r_loss, c_loss, r_loss\n\n\ndef predict(network_output, mask, score_threshold, neg_label_value, anchors,\n max_prediction, num_classes):\n \"\"\"Decode predictions from the neural network.\"\"\"\n\n classification_output = network_output[0]\n batch_size, _, _, output_dim = classification_output.get_shape().as_list()\n regression_output = network_output[1]\n bbox_list = []\n label_list = []\n\n ay, ax, ah, aw = bbox_lib.get_center_coordinates_and_sizes(anchors)\n anchor_center_index = tf.cast(tf.transpose(tf.stack([ay, ax])), tf.int32)\n for single_classification_output, single_regression_output, single_mask in zip(\n classification_output, regression_output, mask):\n # num_classes + 1 due to the negative class.\n single_classification_output = tf.reshape(\n single_classification_output, [-1, num_classes + 1])\n single_classification_output = tf.nn.softmax(\n single_classification_output, -1)\n\n max_confidence = tf.reduce_max(single_classification_output, -1)\n confident_mask = max_confidence > score_threshold\n # - 1 due to the negative class.\n max_index = tf.argmax(single_classification_output, 1) - 1\n non_negative_mask = tf.not_equal(max_index, -1)\n in_mask = tf.gather_nd(single_mask, anchor_center_index)\n foreground_mask = tf.logical_and(\n in_mask, tf.logical_and(confident_mask, non_negative_mask))\n\n valid_labels = tf.boolean_mask(max_index, foreground_mask)\n\n single_regression_output = tf.reshape(single_regression_output, [-1, 4])\n predicted_bbox = bbox_lib.decode_box_with_anchor(\n single_regression_output, anchors)\n valid_boxes = tf.boolean_mask(predicted_bbox, foreground_mask)\n valid_confidence_score = tf.boolean_mask(\n max_confidence, foreground_mask)\n\n selected_indices = tf.image.non_max_suppression(\n valid_boxes, valid_confidence_score, max_prediction)\n\n valid_boxes = tf.gather(valid_boxes, selected_indices)\n valid_labels = tf.gather(valid_labels, selected_indices)\n bbox_list.append(valid_boxes)\n label_list.append(valid_labels)\n\n return bbox_list, label_list\n\n\ndef build_model(num_classes, anchor_num_per_output):\n base_network_model = tf.keras.applications.resnet50.ResNet50(\n include_top=False, weights=\"imagenet\")\n\n for layer in base_network_model.layers:\n layer.trainable = False\n\n h = base_network_model.get_layer(name='activation_39').output\n drop_rate = 0.5\n h = tf.keras.layers.Dropout(drop_rate)(h)\n\n classification_branch = tf.keras.layers.Conv2D(\n (num_classes + 1) * anchor_num_per_output, (1, 1))(h)\n regression_branch = tf.keras.layers.Conv2D(\n 4 * anchor_num_per_output, (1, 1))(h)\n model_outputs = [classification_branch, regression_branch]\n return tf.keras.models.Model(base_network_model.input, model_outputs)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class SpiderForCROWDCUBETest(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.INFO) self.spider = SpiderForCROWDCUBE() self.spider.initDriver() <|reserved_special_token_0|> <|reserved_special_token_0|> def test_downloadCompanyPage(self): logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage') self.spider.downloadCompanyPage() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class SpiderForCROWDCUBETest(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.INFO) self.spider = SpiderForCROWDCUBE() self.spider.initDriver() def tearDown(self): self.spider.quitDriver() <|reserved_special_token_0|> def test_downloadCompanyPage(self): logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage') self.spider.downloadCompanyPage() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class SpiderForCROWDCUBETest(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.INFO) self.spider = SpiderForCROWDCUBE() self.spider.initDriver() def tearDown(self): self.spider.quitDriver() """ #測試註冊帳號 def test_registerAccount(self): logging.info("SpiderForCROWDCUBETest.test_registerAccount") self.spider.registerAccount() #測試登入帳號 def test_loginAccount(self): logging.info("SpiderForCROWDCUBETest.test_loginAccount") self.spider.loginAccount() #測試抓取 companies page def test_downloadCompaniesPage(self): logging.info("SpiderForCROWDCUBETest.test_downloadCompaniesPage") self.spider.downloadCompaniesPage() """ def test_downloadCompanyPage(self): logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage') self.spider.downloadCompanyPage() if __name__ == '__main__': unittest.main(exit=False) <|reserved_special_token_1|> <|reserved_special_token_0|> import unittest import logging from cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE <|reserved_special_token_0|> class SpiderForCROWDCUBETest(unittest.TestCase): def setUp(self): logging.basicConfig(level=logging.INFO) self.spider = SpiderForCROWDCUBE() self.spider.initDriver() def tearDown(self): self.spider.quitDriver() """ #測試註冊帳號 def test_registerAccount(self): logging.info("SpiderForCROWDCUBETest.test_registerAccount") self.spider.registerAccount() #測試登入帳號 def test_loginAccount(self): logging.info("SpiderForCROWDCUBETest.test_loginAccount") self.spider.loginAccount() #測試抓取 companies page def test_downloadCompaniesPage(self): logging.info("SpiderForCROWDCUBETest.test_downloadCompaniesPage") self.spider.downloadCompaniesPage() """ def test_downloadCompanyPage(self): logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage') self.spider.downloadCompanyPage() if __name__ == '__main__': unittest.main(exit=False) <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Copyright (C) 2015, MuChu Hsu Contributed by Muchu Hsu (muchu1983@gmail.com) This file is part of BSD license <https://opensource.org/licenses/BSD-3-Clause> """ import unittest import logging from cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE """ 測試 抓取 CROWDCUBE """ class SpiderForCROWDCUBETest(unittest.TestCase): #準備 def setUp(self): logging.basicConfig(level=logging.INFO) self.spider = SpiderForCROWDCUBE() self.spider.initDriver() #收尾 def tearDown(self): self.spider.quitDriver() """ #測試註冊帳號 def test_registerAccount(self): logging.info("SpiderForCROWDCUBETest.test_registerAccount") self.spider.registerAccount() #測試登入帳號 def test_loginAccount(self): logging.info("SpiderForCROWDCUBETest.test_loginAccount") self.spider.loginAccount() #測試抓取 companies page def test_downloadCompaniesPage(self): logging.info("SpiderForCROWDCUBETest.test_downloadCompaniesPage") self.spider.downloadCompaniesPage() """ #測試抓取 company page def test_downloadCompanyPage(self): logging.info("SpiderForCROWDCUBETest.test_downloadCompanyPage") self.spider.downloadCompanyPage() #測試開始 if __name__ == "__main__": unittest.main(exit=False)
flexible
{ "blob_id": "45856b4c5cbf1d3b414ad769135b2d974bc0a22b", "index": 7120, "step-1": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n <mask token>\n <mask token>\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n <mask token>\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\nif __name__ == '__main__':\n unittest.main(exit=False)\n", "step-4": "<mask token>\nimport unittest\nimport logging\nfrom cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE\n<mask token>\n\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n\n def test_downloadCompanyPage(self):\n logging.info('SpiderForCROWDCUBETest.test_downloadCompanyPage')\n self.spider.downloadCompanyPage()\n\n\nif __name__ == '__main__':\n unittest.main(exit=False)\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (C) 2015, MuChu Hsu\nContributed by Muchu Hsu (muchu1983@gmail.com)\nThis file is part of BSD license\n\n<https://opensource.org/licenses/BSD-3-Clause>\n\"\"\"\nimport unittest\nimport logging\nfrom cameo.spiderForCROWDCUBE import SpiderForCROWDCUBE\n\"\"\"\n測試 抓取 CROWDCUBE\n\"\"\"\n\nclass SpiderForCROWDCUBETest(unittest.TestCase):\n\n #準備\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.spider = SpiderForCROWDCUBE()\n self.spider.initDriver()\n \n #收尾\n def tearDown(self):\n self.spider.quitDriver()\n \"\"\"\n #測試註冊帳號\n def test_registerAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_registerAccount\")\n self.spider.registerAccount()\n \n #測試登入帳號\n def test_loginAccount(self):\n logging.info(\"SpiderForCROWDCUBETest.test_loginAccount\")\n self.spider.loginAccount()\n \n #測試抓取 companies page\n def test_downloadCompaniesPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompaniesPage\")\n self.spider.downloadCompaniesPage()\n \"\"\"\n #測試抓取 company page\n def test_downloadCompanyPage(self):\n logging.info(\"SpiderForCROWDCUBETest.test_downloadCompanyPage\")\n self.spider.downloadCompanyPage()\n \n#測試開始\nif __name__ == \"__main__\":\n unittest.main(exit=False)\n\n\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
<|reserved_special_token_0|> class Script(BaseScript): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Script(BaseScript): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def execute_cli(self, **kwargs): v = self.cli('show version', cached=True) for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM', self.rx_ver), ('ESCOM', self.rx_ver1)]: match = ver.search(v) if match: break else: raise NotImplementedError if platform == 'ESCOM L': hw_match = self.rx_hw_escom_l.search(v) return {'vendor': 'Iskratel', 'version': match.group('version'), 'platform': platform, 'image': hw_match.group('image'), 'attributes': {'Boot PROM': hw_match.group('bootprom'), 'HW version': hw_match.group('hardware'), 'Serial Number': hw_match.group('serial')}} r = {'vendor': 'Iskratel', 'version': match.group('version'), 'attributes': {'Boot PROM': match.group('bootprom'), 'HW version': match.group('hardware')}} v = self.cli('show system', cached=True) match = self.rx_platform.search(v) if not match: match = self.rx_platform1.search(v) r['platform'] = match.group('platform') v = self.cli('show system id', cached=True) match = self.rx_serial.search(v) if match: r['attributes']['Serial Number'] = match.group('serial') return r <|reserved_special_token_1|> <|reserved_special_token_0|> class Script(BaseScript): name = 'Iskratel.ESCOM.get_version' cache = True interface = IGetVersion rx_ver = re.compile( '^\\s*SW version\\s+(?P<version>\\S+).*\\n^\\s*Boot version\\s+(?P<bootprom>\\S+).*\\n^\\s*HW version\\s+(?P<hardware>\\S+).*\\n' , re.MULTILINE) rx_ver1 = re.compile( '^\\s+1\\s+(?P<version>\\S+)\\s+(?P<bootprom>\\S+)\\s+(?P<hardware>\\S+)' , re.MULTILINE) rx_ver_escom_l = re.compile( 'SI3000 ESCOM L Series Software,\\s*Version\\s(?P<version>\\S+) Build (?P<version_build>\\S+),' , re.MULTILINE) rx_hw_escom_l = re.compile( 'ROM:\\s*System Bootstrap, Version\\s*(?P<bootprom>\\S+),\\s*hardware version:\\s*(?P<hardware>\\S+)\\nSerial num:(?P<serial>\\S+), ID num:(?P<id_number>\\S+)\\nSystem image file is \\"(?P<image>\\S+)\\"' , re.MULTILINE) rx_platform = re.compile('^\\s*System Description:\\s+(?P<platform>.+)\\n', re.MULTILINE) rx_platform1 = re.compile('^\\s+1\\s+(?P<platform>\\S+)\\s*\\n', re. MULTILINE) rx_serial = re.compile('^\\s*Serial number : (?P<serial>\\S+)') def execute_cli(self, **kwargs): v = self.cli('show version', cached=True) for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM', self.rx_ver), ('ESCOM', self.rx_ver1)]: match = ver.search(v) if match: break else: raise NotImplementedError if platform == 'ESCOM L': hw_match = self.rx_hw_escom_l.search(v) return {'vendor': 'Iskratel', 'version': match.group('version'), 'platform': platform, 'image': hw_match.group('image'), 'attributes': {'Boot PROM': hw_match.group('bootprom'), 'HW version': hw_match.group('hardware'), 'Serial Number': hw_match.group('serial')}} r = {'vendor': 'Iskratel', 'version': match.group('version'), 'attributes': {'Boot PROM': match.group('bootprom'), 'HW version': match.group('hardware')}} v = self.cli('show system', cached=True) match = self.rx_platform.search(v) if not match: match = self.rx_platform1.search(v) r['platform'] = match.group('platform') v = self.cli('show system id', cached=True) match = self.rx_serial.search(v) if match: r['attributes']['Serial Number'] = match.group('serial') return r <|reserved_special_token_1|> import re from noc.core.script.base import BaseScript from noc.sa.interfaces.igetversion import IGetVersion class Script(BaseScript): name = 'Iskratel.ESCOM.get_version' cache = True interface = IGetVersion rx_ver = re.compile( '^\\s*SW version\\s+(?P<version>\\S+).*\\n^\\s*Boot version\\s+(?P<bootprom>\\S+).*\\n^\\s*HW version\\s+(?P<hardware>\\S+).*\\n' , re.MULTILINE) rx_ver1 = re.compile( '^\\s+1\\s+(?P<version>\\S+)\\s+(?P<bootprom>\\S+)\\s+(?P<hardware>\\S+)' , re.MULTILINE) rx_ver_escom_l = re.compile( 'SI3000 ESCOM L Series Software,\\s*Version\\s(?P<version>\\S+) Build (?P<version_build>\\S+),' , re.MULTILINE) rx_hw_escom_l = re.compile( 'ROM:\\s*System Bootstrap, Version\\s*(?P<bootprom>\\S+),\\s*hardware version:\\s*(?P<hardware>\\S+)\\nSerial num:(?P<serial>\\S+), ID num:(?P<id_number>\\S+)\\nSystem image file is \\"(?P<image>\\S+)\\"' , re.MULTILINE) rx_platform = re.compile('^\\s*System Description:\\s+(?P<platform>.+)\\n', re.MULTILINE) rx_platform1 = re.compile('^\\s+1\\s+(?P<platform>\\S+)\\s*\\n', re. MULTILINE) rx_serial = re.compile('^\\s*Serial number : (?P<serial>\\S+)') def execute_cli(self, **kwargs): v = self.cli('show version', cached=True) for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM', self.rx_ver), ('ESCOM', self.rx_ver1)]: match = ver.search(v) if match: break else: raise NotImplementedError if platform == 'ESCOM L': hw_match = self.rx_hw_escom_l.search(v) return {'vendor': 'Iskratel', 'version': match.group('version'), 'platform': platform, 'image': hw_match.group('image'), 'attributes': {'Boot PROM': hw_match.group('bootprom'), 'HW version': hw_match.group('hardware'), 'Serial Number': hw_match.group('serial')}} r = {'vendor': 'Iskratel', 'version': match.group('version'), 'attributes': {'Boot PROM': match.group('bootprom'), 'HW version': match.group('hardware')}} v = self.cli('show system', cached=True) match = self.rx_platform.search(v) if not match: match = self.rx_platform1.search(v) r['platform'] = match.group('platform') v = self.cli('show system id', cached=True) match = self.rx_serial.search(v) if match: r['attributes']['Serial Number'] = match.group('serial') return r <|reserved_special_token_1|> # --------------------------------------------------------------------- # Iskratel.ESCOM.get_version # --------------------------------------------------------------------- # Copyright (C) 2007-2018 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules import re # NOC modules from noc.core.script.base import BaseScript from noc.sa.interfaces.igetversion import IGetVersion class Script(BaseScript): name = "Iskratel.ESCOM.get_version" cache = True interface = IGetVersion rx_ver = re.compile( r"^\s*SW version\s+(?P<version>\S+).*\n" r"^\s*Boot version\s+(?P<bootprom>\S+).*\n" r"^\s*HW version\s+(?P<hardware>\S+).*\n", re.MULTILINE, ) rx_ver1 = re.compile( r"^\s+1\s+(?P<version>\S+)\s+(?P<bootprom>\S+)\s+(?P<hardware>\S+)", re.MULTILINE ) rx_ver_escom_l = re.compile( r"SI3000 ESCOM L Series Software,\s*Version\s(?P<version>\S+) Build (?P<version_build>\S+),", re.MULTILINE, ) rx_hw_escom_l = re.compile( r"ROM:\s*System Bootstrap, Version\s*(?P<bootprom>\S+),\s*hardware version:\s*(?P<hardware>\S+)\n" r"Serial num:(?P<serial>\S+), ID num:(?P<id_number>\S+)\n" r"System image file is \"(?P<image>\S+)\"", re.MULTILINE, ) rx_platform = re.compile(r"^\s*System Description:\s+(?P<platform>.+)\n", re.MULTILINE) rx_platform1 = re.compile(r"^\s+1\s+(?P<platform>\S+)\s*\n", re.MULTILINE) rx_serial = re.compile(r"^\s*Serial number : (?P<serial>\S+)") def execute_cli(self, **kwargs): v = self.cli("show version", cached=True) for platform, ver in [ ("ESCOM L", self.rx_ver_escom_l), ("ESCOM", self.rx_ver), ("ESCOM", self.rx_ver1), ]: match = ver.search(v) if match: break else: raise NotImplementedError if platform == "ESCOM L": hw_match = self.rx_hw_escom_l.search(v) return { "vendor": "Iskratel", "version": match.group("version"), "platform": platform, "image": hw_match.group("image"), "attributes": { "Boot PROM": hw_match.group("bootprom"), "HW version": hw_match.group("hardware"), "Serial Number": hw_match.group("serial"), }, } r = { "vendor": "Iskratel", "version": match.group("version"), "attributes": { "Boot PROM": match.group("bootprom"), "HW version": match.group("hardware"), }, } v = self.cli("show system", cached=True) match = self.rx_platform.search(v) if not match: match = self.rx_platform1.search(v) r["platform"] = match.group("platform") v = self.cli("show system id", cached=True) match = self.rx_serial.search(v) if match: r["attributes"]["Serial Number"] = match.group("serial") return r
flexible
{ "blob_id": "40b3c403f99044eb61740d62eda15ddd08b0f739", "index": 1980, "step-1": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n", "step-3": "<mask token>\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n", "step-4": "import re\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n", "step-5": "# ---------------------------------------------------------------------\n# Iskratel.ESCOM.get_version\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = \"Iskratel.ESCOM.get_version\"\n cache = True\n interface = IGetVersion\n\n rx_ver = re.compile(\n r\"^\\s*SW version\\s+(?P<version>\\S+).*\\n\"\n r\"^\\s*Boot version\\s+(?P<bootprom>\\S+).*\\n\"\n r\"^\\s*HW version\\s+(?P<hardware>\\S+).*\\n\",\n re.MULTILINE,\n )\n rx_ver1 = re.compile(\n r\"^\\s+1\\s+(?P<version>\\S+)\\s+(?P<bootprom>\\S+)\\s+(?P<hardware>\\S+)\", re.MULTILINE\n )\n rx_ver_escom_l = re.compile(\n r\"SI3000 ESCOM L Series Software,\\s*Version\\s(?P<version>\\S+) Build (?P<version_build>\\S+),\",\n re.MULTILINE,\n )\n rx_hw_escom_l = re.compile(\n r\"ROM:\\s*System Bootstrap, Version\\s*(?P<bootprom>\\S+),\\s*hardware version:\\s*(?P<hardware>\\S+)\\n\"\n r\"Serial num:(?P<serial>\\S+), ID num:(?P<id_number>\\S+)\\n\"\n r\"System image file is \\\"(?P<image>\\S+)\\\"\",\n re.MULTILINE,\n )\n rx_platform = re.compile(r\"^\\s*System Description:\\s+(?P<platform>.+)\\n\", re.MULTILINE)\n rx_platform1 = re.compile(r\"^\\s+1\\s+(?P<platform>\\S+)\\s*\\n\", re.MULTILINE)\n rx_serial = re.compile(r\"^\\s*Serial number : (?P<serial>\\S+)\")\n\n def execute_cli(self, **kwargs):\n v = self.cli(\"show version\", cached=True)\n for platform, ver in [\n (\"ESCOM L\", self.rx_ver_escom_l),\n (\"ESCOM\", self.rx_ver),\n (\"ESCOM\", self.rx_ver1),\n ]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == \"ESCOM L\":\n hw_match = self.rx_hw_escom_l.search(v)\n return {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"platform\": platform,\n \"image\": hw_match.group(\"image\"),\n \"attributes\": {\n \"Boot PROM\": hw_match.group(\"bootprom\"),\n \"HW version\": hw_match.group(\"hardware\"),\n \"Serial Number\": hw_match.group(\"serial\"),\n },\n }\n r = {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"attributes\": {\n \"Boot PROM\": match.group(\"bootprom\"),\n \"HW version\": match.group(\"hardware\"),\n },\n }\n v = self.cli(\"show system\", cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r[\"platform\"] = match.group(\"platform\")\n v = self.cli(\"show system id\", cached=True)\n match = self.rx_serial.search(v)\n if match:\n r[\"attributes\"][\"Serial Number\"] = match.group(\"serial\")\n return r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> explore_patterns = [url('^$', views.explore), url('^(?P<model_type>\\w+)/$', views.get_by_model_type), url('^(?P<model_type>\\w+)/(?P<id>\\w+)/$', views.get_by_model_id), url( '^(?P<model_type>\\w+)/(?P<id>\\w+)/download$', views.download_media_file)] export_patterns = [url('^$', views.download), url( '^(?P<model_type>\\w+)/(?P<id>\\w+)/(?P<format>\\w+)/$', views. export_by_model_id)] urlpatterns = [url('^about/', views.about), url('^help/', views.help), url( '^search/', views.search, name='search'), url('^explore$', views. explore), url('^explore/', include(explore_patterns)), url('^export$', views.download), url('^export/', include(export_patterns)), url('^$', views.home)] <|reserved_special_token_1|> from django.conf.urls import url, include from . import views explore_patterns = [url('^$', views.explore), url('^(?P<model_type>\\w+)/$', views.get_by_model_type), url('^(?P<model_type>\\w+)/(?P<id>\\w+)/$', views.get_by_model_id), url( '^(?P<model_type>\\w+)/(?P<id>\\w+)/download$', views.download_media_file)] export_patterns = [url('^$', views.download), url( '^(?P<model_type>\\w+)/(?P<id>\\w+)/(?P<format>\\w+)/$', views. export_by_model_id)] urlpatterns = [url('^about/', views.about), url('^help/', views.help), url( '^search/', views.search, name='search'), url('^explore$', views. explore), url('^explore/', include(explore_patterns)), url('^export$', views.download), url('^export/', include(export_patterns)), url('^$', views.home)] <|reserved_special_token_1|> from django.conf.urls import url, include from . import views explore_patterns = [ url(r'^$', views.explore), url(r'^(?P<model_type>\w+)/$', views.get_by_model_type), url(r'^(?P<model_type>\w+)/(?P<id>\w+)/$', views.get_by_model_id), url(r'^(?P<model_type>\w+)/(?P<id>\w+)/download$', views.download_media_file), ] export_patterns = [ url(r'^$', views.download), url(r'^(?P<model_type>\w+)/(?P<id>\w+)/(?P<format>\w+)/$', views.export_by_model_id), ] urlpatterns = [ url(r'^about/', views.about), url(r'^help/', views.help), url(r'^search/', views.search, name='search'), url(r'^explore$', views.explore), url(r'^explore/', include(explore_patterns)), url(r'^export$', views.download), url(r'^export/', include(export_patterns)), url(r'^$', views.home), ] #url(r'^logout$', views.logout, name='logout'),
flexible
{ "blob_id": "89078ddd7dad3a2727b66566457b9ac173abe607", "index": 8506, "step-1": "<mask token>\n", "step-2": "<mask token>\nexplore_patterns = [url('^$', views.explore), url('^(?P<model_type>\\\\w+)/$',\n views.get_by_model_type), url('^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/$',\n views.get_by_model_id), url(\n '^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/download$', views.download_media_file)]\nexport_patterns = [url('^$', views.download), url(\n '^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/(?P<format>\\\\w+)/$', views.\n export_by_model_id)]\nurlpatterns = [url('^about/', views.about), url('^help/', views.help), url(\n '^search/', views.search, name='search'), url('^explore$', views.\n explore), url('^explore/', include(explore_patterns)), url('^export$',\n views.download), url('^export/', include(export_patterns)), url('^$',\n views.home)]\n", "step-3": "from django.conf.urls import url, include\nfrom . import views\nexplore_patterns = [url('^$', views.explore), url('^(?P<model_type>\\\\w+)/$',\n views.get_by_model_type), url('^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/$',\n views.get_by_model_id), url(\n '^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/download$', views.download_media_file)]\nexport_patterns = [url('^$', views.download), url(\n '^(?P<model_type>\\\\w+)/(?P<id>\\\\w+)/(?P<format>\\\\w+)/$', views.\n export_by_model_id)]\nurlpatterns = [url('^about/', views.about), url('^help/', views.help), url(\n '^search/', views.search, name='search'), url('^explore$', views.\n explore), url('^explore/', include(explore_patterns)), url('^export$',\n views.download), url('^export/', include(export_patterns)), url('^$',\n views.home)]\n", "step-4": "from django.conf.urls import url, include\n\nfrom . import views\n\nexplore_patterns = [\n url(r'^$', views.explore),\n url(r'^(?P<model_type>\\w+)/$', views.get_by_model_type),\n url(r'^(?P<model_type>\\w+)/(?P<id>\\w+)/$', views.get_by_model_id),\n url(r'^(?P<model_type>\\w+)/(?P<id>\\w+)/download$', views.download_media_file),\n]\n\nexport_patterns = [\n url(r'^$', views.download),\n url(r'^(?P<model_type>\\w+)/(?P<id>\\w+)/(?P<format>\\w+)/$', views.export_by_model_id),\n]\n\nurlpatterns = [\n\turl(r'^about/', views.about),\n url(r'^help/', views.help),\n url(r'^search/', views.search, name='search'),\n url(r'^explore$', views.explore),\n url(r'^explore/', include(explore_patterns)),\n url(r'^export$', views.download),\n url(r'^export/', include(export_patterns)),\n url(r'^$', views.home),\n]\n\t#url(r'^logout$', views.logout, name='logout'),\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import torch import random from itertools import product from Struct import Action class Agent(object): """the agent""" def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5, traceDecay=0.3): # action set possibleChangesPerMagnet = (1e-2, 1e-3, 0, -1e-2, -1e-3) # possibleChangesPerMagnet = (0, -1e-2, -1e-3) self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)) # probability to act greedy self.epsilon = epsilon # Q-function self.q = q # memory self.shortMemory = [] self.memorySize = 1 self.traceDecay = traceDecay self.replayMemory = [] self.replayMemorySize = int(1e4) # learning self.discount = discount self.learningRate = learningRate return def takeAction(self, state): """take an action according to current state""" # go greedy or not? if random.uniform(0, 1) < self.epsilon: # greedy selection # find best action allActions = torch.stack( tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet)) evaluation = self.q.evaluateBunch(allActions) action = Action(state, self.actionSet[evaluation.argmax()]) return action else: # random selection return Action(state, random.choice(self.actionSet)) def bestAction(self, state, isTensor=False): """returns best action and it's rating""" # get value for every possible action if not isTensor: allActions = torch.stack( tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet)) else: allActions = torch.stack( tuple(torch.cat((state, changes)) for changes in self.actionSet)) allValues = self.q.evaluateBunch(allActions) # determine index of highest value bestIndex = allValues.argmax() # get best action bestAction = allActions[bestIndex, -2:] return bestAction, allValues[bestIndex] def remember(self, transition): """place a transition in the memory""" # reduce eligibility for old memories for memory in self.shortMemory: memory *= self.traceDecay * self.discount # add new memory if len(self.shortMemory) < self.memorySize: self.shortMemory.append(transition) else: del self.shortMemory[0] self.shortMemory.append(transition) return def getShortMemory(self): return self.shortMemory def wipeShortMemory(self): """wipe all recent experience""" self.shortMemory = [] return def learn(self, netInput, labels): """train Q-function""" self.q.trainer.applyUpdate(netInput, labels) return def getSarsaLambda(self, shortMemory): """generate TD lambda update targets from short memory""" # get temporal difference error delta = shortMemory[-1].reward + self.discount * self.q.evaluate( self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action) # states netInput = [] for memory in shortMemory: netInput.append( torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes))) netInput = torch.stack(netInput) # updates for every state in memory with respect to its eligibility labels = [] for memory in shortMemory: labels.append(self.learningRate * delta * memory.action.eligibility) labels = torch.tensor(labels) labels = torch.unsqueeze(labels, 1) return netInput, labels def getDQN(self, shortMemory): """generates DQN update targets from short memory""" # sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries) sampleSize = 1 if len(shortMemory) < sampleSize: sample = shortMemory else: sample = random.sample(shortMemory, sampleSize) # states netInput = [] for memory in sample: netInput.append( torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes))) netInput = torch.stack(netInput) # updates for Q-values labels = [] for memory in sample: if memory.nextState: labels.append(memory.reward) else: currentQ = self.q.evaluate(memory.action) labels.append(currentQ + self.learningRate * ( self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ)) labels = torch.tensor(labels) labels = torch.unsqueeze(labels, 1) return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?
normal
{ "blob_id": "63edbbbad9561ddae005d2b5e22a089819dc34c5", "index": 1821, "step-1": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n <mask token>\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n <mask token>\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n", "step-2": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n <mask token>\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n", "step-3": "<mask token>\n\n\nclass Agent(object):\n <mask token>\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n", "step-4": "import torch\nimport random\nfrom itertools import product\nfrom Struct import Action\n\n\nclass Agent(object):\n \"\"\"the agent\"\"\"\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5,\n traceDecay=0.3):\n possibleChangesPerMagnet = 0.01, 0.001, 0, -0.01, -0.001\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for \n x, y in product(possibleChangesPerMagnet, possibleChangesPerMagnet)\n )\n self.epsilon = epsilon\n self.q = q\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n self.replayMemory = []\n self.replayMemorySize = int(10000.0)\n self.discount = discount\n self.learningRate = learningRate\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n if random.uniform(0, 1) < self.epsilon:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n if not isTensor:\n allActions = torch.stack(tuple(torch.cat((state.strengths,\n state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(tuple(torch.cat((state, changes)) for\n changes in self.actionSet))\n allValues = self.q.evaluateBunch(allActions)\n bestIndex = allValues.argmax()\n bestAction = allActions[bestIndex, -2:]\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(self\n .takeAction(shortMemory[-1].nextState)) - self.q.evaluate(\n shortMemory[-1].action)\n netInput = []\n for memory in shortMemory:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility\n )\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n sampleSize = 1\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n netInput = []\n for memory in sample:\n netInput.append(torch.cat((memory.action.state.strengths,\n memory.action.state.focus, memory.action.changes)))\n netInput = torch.stack(netInput)\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (self.discount *\n self.q.evaluateMax(memory.nextState, self.actionSet) -\n currentQ))\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n return netInput.float(), labels.float()\n", "step-5": "import torch\nimport random\nfrom itertools import product\n\nfrom Struct import Action\n\n\nclass Agent(object):\n \"\"\"the agent\"\"\"\n\n def __init__(self, q, epsilon=0.8, discount=0.9, learningRate=0.5, traceDecay=0.3):\n # action set\n possibleChangesPerMagnet = (1e-2, 1e-3, 0, -1e-2, -1e-3)\n # possibleChangesPerMagnet = (0, -1e-2, -1e-3)\n self.actionSet = tuple(torch.tensor((x, y), dtype=torch.float) for x, y in\n product(possibleChangesPerMagnet, possibleChangesPerMagnet))\n\n # probability to act greedy\n self.epsilon = epsilon\n\n # Q-function\n self.q = q\n\n # memory\n self.shortMemory = []\n self.memorySize = 1\n self.traceDecay = traceDecay\n\n self.replayMemory = []\n self.replayMemorySize = int(1e4)\n\n # learning\n self.discount = discount\n self.learningRate = learningRate\n\n return\n\n def takeAction(self, state):\n \"\"\"take an action according to current state\"\"\"\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))\n\n def bestAction(self, state, isTensor=False):\n \"\"\"returns best action and it's rating\"\"\"\n # get value for every possible action\n if not isTensor:\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n else:\n allActions = torch.stack(\n tuple(torch.cat((state, changes)) for changes in self.actionSet))\n\n allValues = self.q.evaluateBunch(allActions)\n\n # determine index of highest value\n bestIndex = allValues.argmax()\n\n # get best action\n bestAction = allActions[bestIndex, -2:]\n\n return bestAction, allValues[bestIndex]\n\n def remember(self, transition):\n \"\"\"place a transition in the memory\"\"\"\n # reduce eligibility for old memories\n for memory in self.shortMemory:\n memory *= self.traceDecay * self.discount\n\n # add new memory\n if len(self.shortMemory) < self.memorySize:\n self.shortMemory.append(transition)\n else:\n del self.shortMemory[0]\n self.shortMemory.append(transition)\n\n return\n\n def getShortMemory(self):\n return self.shortMemory\n\n def wipeShortMemory(self):\n \"\"\"wipe all recent experience\"\"\"\n self.shortMemory = []\n return\n\n def learn(self, netInput, labels):\n \"\"\"train Q-function\"\"\"\n self.q.trainer.applyUpdate(netInput, labels)\n return\n\n def getSarsaLambda(self, shortMemory):\n \"\"\"generate TD lambda update targets from short memory\"\"\"\n # get temporal difference error\n delta = shortMemory[-1].reward + self.discount * self.q.evaluate(\n self.takeAction(shortMemory[-1].nextState)) - self.q.evaluate(shortMemory[-1].action)\n\n # states\n netInput = []\n for memory in shortMemory:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for every state in memory with respect to its eligibility\n labels = []\n for memory in shortMemory:\n labels.append(self.learningRate * delta * memory.action.eligibility)\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput, labels\n\n def getDQN(self, shortMemory):\n \"\"\"generates DQN update targets from short memory\"\"\"\n # sampleSize = self.memorySize // 5 # use only with traces (= short memory larger than 5 entries)\n sampleSize = 1\n\n if len(shortMemory) < sampleSize:\n sample = shortMemory\n else:\n sample = random.sample(shortMemory, sampleSize)\n\n # states\n netInput = []\n for memory in sample:\n netInput.append(\n torch.cat((memory.action.state.strengths, memory.action.state.focus, memory.action.changes)))\n\n netInput = torch.stack(netInput)\n\n # updates for Q-values\n labels = []\n for memory in sample:\n if memory.nextState:\n labels.append(memory.reward)\n else:\n currentQ = self.q.evaluate(memory.action)\n labels.append(currentQ + self.learningRate * (\n self.discount * self.q.evaluateMax(memory.nextState, self.actionSet) - currentQ))\n\n labels = torch.tensor(labels)\n labels = torch.unsqueeze(labels, 1)\n\n return netInput.float(), labels.float() # casting added due to occasional occurrence of LongTensors <- why?\n", "step-ids": [ 8, 9, 10, 12, 13 ] }
[ 8, 9, 10, 12, 13 ]
__version__ = '0.2.11' # This list defines all the modules that will be loaded if a user invokes # from climLab import * # totally out of date! #__all__ = ["constants", "thermo", "orbital_table", # "long_orbital_table", "insolation", "ebm", # "column", "convadj"] #from climlab import radiation # this should ensure that we can still import constants.py as climlab.constants from climlab.utils import constants from climlab.utils import thermo, legendre # some more useful shorcuts #from climlab.model import ebm, column from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal from climlab.domain import domain from climlab.domain.field import Field, global_mean from climlab.domain.axis import Axis from climlab.process.process import Process, process_like, get_axes from climlab.process.time_dependent_process import TimeDependentProcess from climlab.process.implicit import ImplicitProcess from climlab.process.diagnostic import DiagnosticProcess from climlab.process.energy_budget import EnergyBudget
normal
{ "blob_id": "8251a9c798b3cdc2f374d0a0406ccfaa11b7c5e3", "index": 5699, "step-1": "<mask token>\n", "step-2": "__version__ = '0.2.11'\n<mask token>\n", "step-3": "__version__ = '0.2.11'\nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n", "step-4": "__version__ = '0.2.11'\n\n# This list defines all the modules that will be loaded if a user invokes\n# from climLab import *\n\n# totally out of date!\n\n#__all__ = [\"constants\", \"thermo\", \"orbital_table\",\n# \"long_orbital_table\", \"insolation\", \"ebm\",\n# \"column\", \"convadj\"]\n\n#from climlab import radiation\n# this should ensure that we can still import constants.py as climlab.constants \nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\n# some more useful shorcuts\n#from climlab.model import ebm, column\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class Application(tk.Frame): <|reserved_special_token_0|> <|reserved_special_token_0|> def show_win(self): msg = 'YOU WIN!' mb.showinfo('Information', msg) self.makePlayButtons() def move(self, num): def move2(self=self, num=num): index = self.numbers.index(num) r = index // 4 c = index % 4 if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0: self.numbers[4 * (r - 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r - 1) + c] self.playButtons[index].grid(row=r - 1 + 1, column=c) self.playButtons[4 * (r - 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r - 1) + c] elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0: self.numbers[4 * (r + 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r + 1) + c] self.playButtons[index].grid(row=r + 1 + 1, column=c) self.playButtons[4 * (r + 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r + 1) + c] elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0: self.numbers[4 * r + c + 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c + 1] self.playButtons[index].grid(row=r + 1, column=c + 1) self.playButtons[4 * r + c + 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c + 1 ] elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0: self.numbers[4 * r + c - 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c - 1] self.playButtons[index].grid(row=r + 1, column=c - 1) self.playButtons[4 * r + c - 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c - 1 ] if self.numbers == [(i % 16) for i in range(1, 17)]: self.show_win() return move2 def makePlayButtons(self): for but in self.playButtons: if but != 0: but.destroy() self.numbers = [i for i in range(n)] random.shuffle(self.numbers) self.playButtons = [0] * n for i in range(n): if self.numbers[i] == 0: continue self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i])) self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky= 'SENW') def createWidgets(self): self.quitButton = tk.Button(self, text='Exit', command=self.quit) self.newButton = tk.Button(self, text='New', command=self. makePlayButtons) self.makePlayButtons() self.quitButton.grid(row=0, column=0) self.newButton.grid(row=0, column=2) self.master.columnconfigure(0, weight=1) self.master.rowconfigure(0, weight=1) for r in range(1, 5): self.rowconfigure(r, weight=1) for c in range(4): self.columnconfigure(c, weight=1) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Application(tk.Frame): playButtons = [0] * n def __init__(self, master=None): tk.Frame.__init__(self, master) self.grid(sticky='NEWS') self.createWidgets() def show_win(self): msg = 'YOU WIN!' mb.showinfo('Information', msg) self.makePlayButtons() def move(self, num): def move2(self=self, num=num): index = self.numbers.index(num) r = index // 4 c = index % 4 if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0: self.numbers[4 * (r - 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r - 1) + c] self.playButtons[index].grid(row=r - 1 + 1, column=c) self.playButtons[4 * (r - 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r - 1) + c] elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0: self.numbers[4 * (r + 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r + 1) + c] self.playButtons[index].grid(row=r + 1 + 1, column=c) self.playButtons[4 * (r + 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r + 1) + c] elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0: self.numbers[4 * r + c + 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c + 1] self.playButtons[index].grid(row=r + 1, column=c + 1) self.playButtons[4 * r + c + 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c + 1 ] elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0: self.numbers[4 * r + c - 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c - 1] self.playButtons[index].grid(row=r + 1, column=c - 1) self.playButtons[4 * r + c - 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c - 1 ] if self.numbers == [(i % 16) for i in range(1, 17)]: self.show_win() return move2 def makePlayButtons(self): for but in self.playButtons: if but != 0: but.destroy() self.numbers = [i for i in range(n)] random.shuffle(self.numbers) self.playButtons = [0] * n for i in range(n): if self.numbers[i] == 0: continue self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i])) self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky= 'SENW') def createWidgets(self): self.quitButton = tk.Button(self, text='Exit', command=self.quit) self.newButton = tk.Button(self, text='New', command=self. makePlayButtons) self.makePlayButtons() self.quitButton.grid(row=0, column=0) self.newButton.grid(row=0, column=2) self.master.columnconfigure(0, weight=1) self.master.rowconfigure(0, weight=1) for r in range(1, 5): self.rowconfigure(r, weight=1) for c in range(4): self.columnconfigure(c, weight=1) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Application(tk.Frame): playButtons = [0] * n def __init__(self, master=None): tk.Frame.__init__(self, master) self.grid(sticky='NEWS') self.createWidgets() def show_win(self): msg = 'YOU WIN!' mb.showinfo('Information', msg) self.makePlayButtons() def move(self, num): def move2(self=self, num=num): index = self.numbers.index(num) r = index // 4 c = index % 4 if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0: self.numbers[4 * (r - 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r - 1) + c] self.playButtons[index].grid(row=r - 1 + 1, column=c) self.playButtons[4 * (r - 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r - 1) + c] elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0: self.numbers[4 * (r + 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r + 1) + c] self.playButtons[index].grid(row=r + 1 + 1, column=c) self.playButtons[4 * (r + 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r + 1) + c] elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0: self.numbers[4 * r + c + 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c + 1] self.playButtons[index].grid(row=r + 1, column=c + 1) self.playButtons[4 * r + c + 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c + 1 ] elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0: self.numbers[4 * r + c - 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c - 1] self.playButtons[index].grid(row=r + 1, column=c - 1) self.playButtons[4 * r + c - 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c - 1 ] if self.numbers == [(i % 16) for i in range(1, 17)]: self.show_win() return move2 def makePlayButtons(self): for but in self.playButtons: if but != 0: but.destroy() self.numbers = [i for i in range(n)] random.shuffle(self.numbers) self.playButtons = [0] * n for i in range(n): if self.numbers[i] == 0: continue self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i])) self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky= 'SENW') def createWidgets(self): self.quitButton = tk.Button(self, text='Exit', command=self.quit) self.newButton = tk.Button(self, text='New', command=self. makePlayButtons) self.makePlayButtons() self.quitButton.grid(row=0, column=0) self.newButton.grid(row=0, column=2) self.master.columnconfigure(0, weight=1) self.master.rowconfigure(0, weight=1) for r in range(1, 5): self.rowconfigure(r, weight=1) for c in range(4): self.columnconfigure(c, weight=1) <|reserved_special_token_0|> app.master.title('15 puzzle') app.mainloop() <|reserved_special_token_1|> <|reserved_special_token_0|> n = 16 class Application(tk.Frame): playButtons = [0] * n def __init__(self, master=None): tk.Frame.__init__(self, master) self.grid(sticky='NEWS') self.createWidgets() def show_win(self): msg = 'YOU WIN!' mb.showinfo('Information', msg) self.makePlayButtons() def move(self, num): def move2(self=self, num=num): index = self.numbers.index(num) r = index // 4 c = index % 4 if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0: self.numbers[4 * (r - 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r - 1) + c] self.playButtons[index].grid(row=r - 1 + 1, column=c) self.playButtons[4 * (r - 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r - 1) + c] elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0: self.numbers[4 * (r + 1) + c], self.numbers[index ] = self.numbers[index], self.numbers[4 * (r + 1) + c] self.playButtons[index].grid(row=r + 1 + 1, column=c) self.playButtons[4 * (r + 1) + c], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * (r + 1) + c] elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0: self.numbers[4 * r + c + 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c + 1] self.playButtons[index].grid(row=r + 1, column=c + 1) self.playButtons[4 * r + c + 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c + 1 ] elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0: self.numbers[4 * r + c - 1], self.numbers[index ] = self.numbers[index], self.numbers[4 * r + c - 1] self.playButtons[index].grid(row=r + 1, column=c - 1) self.playButtons[4 * r + c - 1], self.playButtons[index ] = self.playButtons[index], self.playButtons[4 * r + c - 1 ] if self.numbers == [(i % 16) for i in range(1, 17)]: self.show_win() return move2 def makePlayButtons(self): for but in self.playButtons: if but != 0: but.destroy() self.numbers = [i for i in range(n)] random.shuffle(self.numbers) self.playButtons = [0] * n for i in range(n): if self.numbers[i] == 0: continue self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i])) self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky= 'SENW') def createWidgets(self): self.quitButton = tk.Button(self, text='Exit', command=self.quit) self.newButton = tk.Button(self, text='New', command=self. makePlayButtons) self.makePlayButtons() self.quitButton.grid(row=0, column=0) self.newButton.grid(row=0, column=2) self.master.columnconfigure(0, weight=1) self.master.rowconfigure(0, weight=1) for r in range(1, 5): self.rowconfigure(r, weight=1) for c in range(4): self.columnconfigure(c, weight=1) app = Application() app.master.title('15 puzzle') app.mainloop() <|reserved_special_token_1|> import tkinter as tk import random from tkinter import messagebox as mb n = 16 class Application(tk.Frame): playButtons = [0] * n def __init__(self, master=None): tk.Frame.__init__(self, master) self.grid(sticky='NEWS') self.createWidgets() def show_win(self): msg = "YOU WIN!" mb.showinfo("Information", msg) self.makePlayButtons() def move(self, num): def move2(self=self, num=num): index = self.numbers.index(num) r = index // 4 c = index % 4 if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0: self.numbers[4 * (r - 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r - 1) + c] self.playButtons[index].grid(row=r - 1 + 1, column=c) self.playButtons[4 * (r - 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r - 1) + c] elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0: self.numbers[4 * (r + 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r + 1) + c] self.playButtons[index].grid(row=r + 1 + 1, column=c) self.playButtons[4 * (r + 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r + 1) + c] elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0: self.numbers[4 * r + c + 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c + 1] self.playButtons[index].grid(row=r + 1, column=c + 1) self.playButtons[4 * r + c + 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c + 1] elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0: self.numbers[4 * r + c - 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c - 1] self.playButtons[index].grid(row=r + 1, column=c - 1) self.playButtons[4 * r + c - 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c - 1] if self.numbers == [i % 16 for i in range(1, 17)]: self.show_win() return move2 def makePlayButtons(self): for but in self.playButtons: if but != 0: but.destroy() self.numbers = [i for i in range(n)] random.shuffle(self.numbers) self.playButtons = [0] * n for i in range(n): if self.numbers[i] == 0: continue self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i])) self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky='SENW') def createWidgets(self): self.quitButton = tk.Button(self, text='Exit', command=self.quit) self.newButton = tk.Button(self, text='New', command=self.makePlayButtons) self.makePlayButtons() self.quitButton.grid(row=0, column=0) self.newButton.grid(row=0, column=2) self.master.columnconfigure(0, weight = 1) self.master.rowconfigure(0, weight = 1) for r in range(1, 5): self.rowconfigure(r, weight = 1) for c in range(4): self.columnconfigure(c, weight = 1) #self.show_win() app = Application() app.master.title('15 puzzle') app.mainloop()
flexible
{ "blob_id": "f29bc0263f8bb1d59ab2442347727d9d3233ec77", "index": 9893, "step-1": "<mask token>\n\n\nclass Application(tk.Frame):\n <mask token>\n <mask token>\n\n def show_win(self):\n msg = 'YOU WIN!'\n mb.showinfo('Information', msg)\n self.makePlayButtons()\n\n def move(self, num):\n\n def move2(self=self, num=num):\n index = self.numbers.index(num)\n r = index // 4\n c = index % 4\n if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:\n self.numbers[4 * (r - 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r - 1) + c]\n self.playButtons[index].grid(row=r - 1 + 1, column=c)\n self.playButtons[4 * (r - 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r - \n 1) + c]\n elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:\n self.numbers[4 * (r + 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r + 1) + c]\n self.playButtons[index].grid(row=r + 1 + 1, column=c)\n self.playButtons[4 * (r + 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r + \n 1) + c]\n elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:\n self.numbers[4 * r + c + 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c + 1]\n self.playButtons[index].grid(row=r + 1, column=c + 1)\n self.playButtons[4 * r + c + 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c + 1\n ]\n elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:\n self.numbers[4 * r + c - 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c - 1]\n self.playButtons[index].grid(row=r + 1, column=c - 1)\n self.playButtons[4 * r + c - 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c - 1\n ]\n if self.numbers == [(i % 16) for i in range(1, 17)]:\n self.show_win()\n return move2\n\n def makePlayButtons(self):\n for but in self.playButtons:\n if but != 0:\n but.destroy()\n self.numbers = [i for i in range(n)]\n random.shuffle(self.numbers)\n self.playButtons = [0] * n\n for i in range(n):\n if self.numbers[i] == 0:\n continue\n self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]),\n command=self.move(self.numbers[i]))\n self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky=\n 'SENW')\n\n def createWidgets(self):\n self.quitButton = tk.Button(self, text='Exit', command=self.quit)\n self.newButton = tk.Button(self, text='New', command=self.\n makePlayButtons)\n self.makePlayButtons()\n self.quitButton.grid(row=0, column=0)\n self.newButton.grid(row=0, column=2)\n self.master.columnconfigure(0, weight=1)\n self.master.rowconfigure(0, weight=1)\n for r in range(1, 5):\n self.rowconfigure(r, weight=1)\n for c in range(4):\n self.columnconfigure(c, weight=1)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Application(tk.Frame):\n playButtons = [0] * n\n\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.grid(sticky='NEWS')\n self.createWidgets()\n\n def show_win(self):\n msg = 'YOU WIN!'\n mb.showinfo('Information', msg)\n self.makePlayButtons()\n\n def move(self, num):\n\n def move2(self=self, num=num):\n index = self.numbers.index(num)\n r = index // 4\n c = index % 4\n if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:\n self.numbers[4 * (r - 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r - 1) + c]\n self.playButtons[index].grid(row=r - 1 + 1, column=c)\n self.playButtons[4 * (r - 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r - \n 1) + c]\n elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:\n self.numbers[4 * (r + 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r + 1) + c]\n self.playButtons[index].grid(row=r + 1 + 1, column=c)\n self.playButtons[4 * (r + 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r + \n 1) + c]\n elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:\n self.numbers[4 * r + c + 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c + 1]\n self.playButtons[index].grid(row=r + 1, column=c + 1)\n self.playButtons[4 * r + c + 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c + 1\n ]\n elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:\n self.numbers[4 * r + c - 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c - 1]\n self.playButtons[index].grid(row=r + 1, column=c - 1)\n self.playButtons[4 * r + c - 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c - 1\n ]\n if self.numbers == [(i % 16) for i in range(1, 17)]:\n self.show_win()\n return move2\n\n def makePlayButtons(self):\n for but in self.playButtons:\n if but != 0:\n but.destroy()\n self.numbers = [i for i in range(n)]\n random.shuffle(self.numbers)\n self.playButtons = [0] * n\n for i in range(n):\n if self.numbers[i] == 0:\n continue\n self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]),\n command=self.move(self.numbers[i]))\n self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky=\n 'SENW')\n\n def createWidgets(self):\n self.quitButton = tk.Button(self, text='Exit', command=self.quit)\n self.newButton = tk.Button(self, text='New', command=self.\n makePlayButtons)\n self.makePlayButtons()\n self.quitButton.grid(row=0, column=0)\n self.newButton.grid(row=0, column=2)\n self.master.columnconfigure(0, weight=1)\n self.master.rowconfigure(0, weight=1)\n for r in range(1, 5):\n self.rowconfigure(r, weight=1)\n for c in range(4):\n self.columnconfigure(c, weight=1)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Application(tk.Frame):\n playButtons = [0] * n\n\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.grid(sticky='NEWS')\n self.createWidgets()\n\n def show_win(self):\n msg = 'YOU WIN!'\n mb.showinfo('Information', msg)\n self.makePlayButtons()\n\n def move(self, num):\n\n def move2(self=self, num=num):\n index = self.numbers.index(num)\n r = index // 4\n c = index % 4\n if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:\n self.numbers[4 * (r - 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r - 1) + c]\n self.playButtons[index].grid(row=r - 1 + 1, column=c)\n self.playButtons[4 * (r - 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r - \n 1) + c]\n elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:\n self.numbers[4 * (r + 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r + 1) + c]\n self.playButtons[index].grid(row=r + 1 + 1, column=c)\n self.playButtons[4 * (r + 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r + \n 1) + c]\n elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:\n self.numbers[4 * r + c + 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c + 1]\n self.playButtons[index].grid(row=r + 1, column=c + 1)\n self.playButtons[4 * r + c + 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c + 1\n ]\n elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:\n self.numbers[4 * r + c - 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c - 1]\n self.playButtons[index].grid(row=r + 1, column=c - 1)\n self.playButtons[4 * r + c - 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c - 1\n ]\n if self.numbers == [(i % 16) for i in range(1, 17)]:\n self.show_win()\n return move2\n\n def makePlayButtons(self):\n for but in self.playButtons:\n if but != 0:\n but.destroy()\n self.numbers = [i for i in range(n)]\n random.shuffle(self.numbers)\n self.playButtons = [0] * n\n for i in range(n):\n if self.numbers[i] == 0:\n continue\n self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]),\n command=self.move(self.numbers[i]))\n self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky=\n 'SENW')\n\n def createWidgets(self):\n self.quitButton = tk.Button(self, text='Exit', command=self.quit)\n self.newButton = tk.Button(self, text='New', command=self.\n makePlayButtons)\n self.makePlayButtons()\n self.quitButton.grid(row=0, column=0)\n self.newButton.grid(row=0, column=2)\n self.master.columnconfigure(0, weight=1)\n self.master.rowconfigure(0, weight=1)\n for r in range(1, 5):\n self.rowconfigure(r, weight=1)\n for c in range(4):\n self.columnconfigure(c, weight=1)\n\n\n<mask token>\napp.master.title('15 puzzle')\napp.mainloop()\n", "step-4": "<mask token>\nn = 16\n\n\nclass Application(tk.Frame):\n playButtons = [0] * n\n\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.grid(sticky='NEWS')\n self.createWidgets()\n\n def show_win(self):\n msg = 'YOU WIN!'\n mb.showinfo('Information', msg)\n self.makePlayButtons()\n\n def move(self, num):\n\n def move2(self=self, num=num):\n index = self.numbers.index(num)\n r = index // 4\n c = index % 4\n if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:\n self.numbers[4 * (r - 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r - 1) + c]\n self.playButtons[index].grid(row=r - 1 + 1, column=c)\n self.playButtons[4 * (r - 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r - \n 1) + c]\n elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:\n self.numbers[4 * (r + 1) + c], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * (r + 1) + c]\n self.playButtons[index].grid(row=r + 1 + 1, column=c)\n self.playButtons[4 * (r + 1) + c], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * (r + \n 1) + c]\n elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:\n self.numbers[4 * r + c + 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c + 1]\n self.playButtons[index].grid(row=r + 1, column=c + 1)\n self.playButtons[4 * r + c + 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c + 1\n ]\n elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:\n self.numbers[4 * r + c - 1], self.numbers[index\n ] = self.numbers[index], self.numbers[4 * r + c - 1]\n self.playButtons[index].grid(row=r + 1, column=c - 1)\n self.playButtons[4 * r + c - 1], self.playButtons[index\n ] = self.playButtons[index], self.playButtons[4 * r + c - 1\n ]\n if self.numbers == [(i % 16) for i in range(1, 17)]:\n self.show_win()\n return move2\n\n def makePlayButtons(self):\n for but in self.playButtons:\n if but != 0:\n but.destroy()\n self.numbers = [i for i in range(n)]\n random.shuffle(self.numbers)\n self.playButtons = [0] * n\n for i in range(n):\n if self.numbers[i] == 0:\n continue\n self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]),\n command=self.move(self.numbers[i]))\n self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky=\n 'SENW')\n\n def createWidgets(self):\n self.quitButton = tk.Button(self, text='Exit', command=self.quit)\n self.newButton = tk.Button(self, text='New', command=self.\n makePlayButtons)\n self.makePlayButtons()\n self.quitButton.grid(row=0, column=0)\n self.newButton.grid(row=0, column=2)\n self.master.columnconfigure(0, weight=1)\n self.master.rowconfigure(0, weight=1)\n for r in range(1, 5):\n self.rowconfigure(r, weight=1)\n for c in range(4):\n self.columnconfigure(c, weight=1)\n\n\napp = Application()\napp.master.title('15 puzzle')\napp.mainloop()\n", "step-5": "import tkinter as tk\nimport random\nfrom tkinter import messagebox as mb\n\nn = 16\n\n\nclass Application(tk.Frame):\n playButtons = [0] * n\n def __init__(self, master=None):\n tk.Frame.__init__(self, master)\n self.grid(sticky='NEWS')\n self.createWidgets()\n\n def show_win(self):\n msg = \"YOU WIN!\"\n mb.showinfo(\"Information\", msg)\n self.makePlayButtons()\n\n def move(self, num): \n def move2(self=self, num=num):\n index = self.numbers.index(num)\n r = index // 4\n c = index % 4\n if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:\n self.numbers[4 * (r - 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r - 1) + c]\n self.playButtons[index].grid(row=r - 1 + 1, column=c)\n self.playButtons[4 * (r - 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r - 1) + c]\n\n elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:\n self.numbers[4 * (r + 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r + 1) + c]\n self.playButtons[index].grid(row=r + 1 + 1, column=c)\n self.playButtons[4 * (r + 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r + 1) + c]\n elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:\n self.numbers[4 * r + c + 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c + 1]\n self.playButtons[index].grid(row=r + 1, column=c + 1)\n self.playButtons[4 * r + c + 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c + 1]\n elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:\n self.numbers[4 * r + c - 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c - 1]\n self.playButtons[index].grid(row=r + 1, column=c - 1)\n self.playButtons[4 * r + c - 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c - 1]\n if self.numbers == [i % 16 for i in range(1, 17)]:\n self.show_win()\n return move2 \n\n def makePlayButtons(self):\n for but in self.playButtons:\n if but != 0:\n but.destroy()\n\n self.numbers = [i for i in range(n)]\n random.shuffle(self.numbers)\n\n self.playButtons = [0] * n\n for i in range(n):\n if self.numbers[i] == 0:\n continue\n self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i]))\n self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky='SENW')\n\n\n\n def createWidgets(self):\n \n\n self.quitButton = tk.Button(self, text='Exit', command=self.quit)\n self.newButton = tk.Button(self, text='New', command=self.makePlayButtons)\n\n self.makePlayButtons()\n\n self.quitButton.grid(row=0, column=0)\n self.newButton.grid(row=0, column=2)\n\n self.master.columnconfigure(0, weight = 1)\n self.master.rowconfigure(0, weight = 1)\n\n for r in range(1, 5):\n self.rowconfigure(r, weight = 1)\n for c in range(4):\n self.columnconfigure(c, weight = 1)\n #self.show_win()\n\napp = Application()\napp.master.title('15 puzzle')\napp.mainloop()", "step-ids": [ 5, 7, 8, 9, 11 ] }
[ 5, 7, 8, 9, 11 ]
<|reserved_special_token_0|> def download_install_deb(package_path, package_url): download_file(package_path, package_url) install_debian_package_binary(package_path) remove_file(package_path) <|reserved_special_token_0|> def write_file(path, data, mode='w'): if os.path.exists(path) and mode is not 'a': pathBAK = path + '.bak' os.rename(path, pathBAK) with open(path, mode) as handle: handle.write(data) def remove_file(path, replace_with_backup=False): backup_path = path + '.bak' shutil.copy(path, backup_path) if os.path.exists(path): os.remove(path) if replace_with_backup and os.path.exists(backup_path): os.rename(path, backup_path) def slugify(*args, **kwargs): return PipSlugify(*args, **kwargs) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def download_install_deb(package_path, package_url): download_file(package_path, package_url) install_debian_package_binary(package_path) remove_file(package_path) def install_apt_packages(packages): if not isinstance(packages, basestring): packages = ' '.join(packages) os.system('sudo apt-get install -y {packages}'.format(packages=packages)) <|reserved_special_token_0|> def write_file(path, data, mode='w'): if os.path.exists(path) and mode is not 'a': pathBAK = path + '.bak' os.rename(path, pathBAK) with open(path, mode) as handle: handle.write(data) def remove_file(path, replace_with_backup=False): backup_path = path + '.bak' shutil.copy(path, backup_path) if os.path.exists(path): os.remove(path) if replace_with_backup and os.path.exists(backup_path): os.rename(path, backup_path) def slugify(*args, **kwargs): return PipSlugify(*args, **kwargs) def copy_and_backup_original(from_path, to_path): if os.path.exists(to_path): rename = to_path + '.bak' os.rename(to_path, rename) shutil.copytree(from_path, to_path) <|reserved_special_token_1|> <|reserved_special_token_0|> def download_install_deb(package_path, package_url): download_file(package_path, package_url) install_debian_package_binary(package_path) remove_file(package_path) def install_apt_packages(packages): if not isinstance(packages, basestring): packages = ' '.join(packages) os.system('sudo apt-get install -y {packages}'.format(packages=packages)) def download_file(target_path, source_url): try: r = requests.get(source_url, stream=True) with open(target_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() return True except: return False def write_file(path, data, mode='w'): if os.path.exists(path) and mode is not 'a': pathBAK = path + '.bak' os.rename(path, pathBAK) with open(path, mode) as handle: handle.write(data) def remove_file(path, replace_with_backup=False): backup_path = path + '.bak' shutil.copy(path, backup_path) if os.path.exists(path): os.remove(path) if replace_with_backup and os.path.exists(backup_path): os.rename(path, backup_path) def slugify(*args, **kwargs): return PipSlugify(*args, **kwargs) def copy_and_backup_original(from_path, to_path): if os.path.exists(to_path): rename = to_path + '.bak' os.rename(to_path, rename) shutil.copytree(from_path, to_path) <|reserved_special_token_1|> <|reserved_special_token_0|> def install_debian_package_binary(package_path): os.system('sudo dpkg -i {package_path}'.format(package_path=package_path)) os.system('sudo apt-get install -f') def download_install_deb(package_path, package_url): download_file(package_path, package_url) install_debian_package_binary(package_path) remove_file(package_path) def install_apt_packages(packages): if not isinstance(packages, basestring): packages = ' '.join(packages) os.system('sudo apt-get install -y {packages}'.format(packages=packages)) def download_file(target_path, source_url): try: r = requests.get(source_url, stream=True) with open(target_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() return True except: return False def write_file(path, data, mode='w'): if os.path.exists(path) and mode is not 'a': pathBAK = path + '.bak' os.rename(path, pathBAK) with open(path, mode) as handle: handle.write(data) def remove_file(path, replace_with_backup=False): backup_path = path + '.bak' shutil.copy(path, backup_path) if os.path.exists(path): os.remove(path) if replace_with_backup and os.path.exists(backup_path): os.rename(path, backup_path) def slugify(*args, **kwargs): return PipSlugify(*args, **kwargs) def copy_and_backup_original(from_path, to_path): if os.path.exists(to_path): rename = to_path + '.bak' os.rename(to_path, rename) shutil.copytree(from_path, to_path) <|reserved_special_token_1|> import requests import os from slugify import slugify as PipSlugify import shutil # will install any valid .deb package def install_debian_package_binary(package_path): os.system("sudo dpkg -i {package_path}".format( package_path=package_path )) os.system("sudo apt-get install -f") def download_install_deb(package_path, package_url): download_file(package_path, package_url) install_debian_package_binary(package_path) remove_file(package_path) def install_apt_packages(packages): if not isinstance(packages, basestring): packages = " ".join(packages) os.system("sudo apt-get install -y {packages}".format(packages=packages)) # download a file available at source_url to target_path on the file system. def download_file(target_path, source_url): try: # NOTE the stream=True parameter r = requests.get(source_url, stream=True) with open(target_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() return True # TODO: better exception handling except: return False def write_file(path, data, mode='w'): if os.path.exists(path) and mode is not 'a': pathBAK = path + ".bak" os.rename(path, pathBAK) with open(path, mode) as handle: handle.write(data) def remove_file(path, replace_with_backup=False): # make a backup backup_path = path + ".bak" shutil.copy(path, backup_path) # remove the file if os.path.exists(path): os.remove(path) # replace existing with backup if replace_with_backup and os.path.exists(backup_path): os.rename(path, backup_path) # abstract the library choice/implementation of slugify from the installer def slugify(*args, **kwargs): return PipSlugify(*args, **kwargs) def copy_and_backup_original(from_path, to_path): if os.path.exists(to_path): rename = to_path + ".bak" os.rename(to_path, rename) shutil.copytree(from_path, to_path)
flexible
{ "blob_id": "f546eb40ee8a7308ded62532731561029e5ec335", "index": 7870, "step-1": "<mask token>\n\n\ndef download_install_deb(package_path, package_url):\n download_file(package_path, package_url)\n install_debian_package_binary(package_path)\n remove_file(package_path)\n\n\n<mask token>\n\n\ndef write_file(path, data, mode='w'):\n if os.path.exists(path) and mode is not 'a':\n pathBAK = path + '.bak'\n os.rename(path, pathBAK)\n with open(path, mode) as handle:\n handle.write(data)\n\n\ndef remove_file(path, replace_with_backup=False):\n backup_path = path + '.bak'\n shutil.copy(path, backup_path)\n if os.path.exists(path):\n os.remove(path)\n if replace_with_backup and os.path.exists(backup_path):\n os.rename(path, backup_path)\n\n\ndef slugify(*args, **kwargs):\n return PipSlugify(*args, **kwargs)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef download_install_deb(package_path, package_url):\n download_file(package_path, package_url)\n install_debian_package_binary(package_path)\n remove_file(package_path)\n\n\ndef install_apt_packages(packages):\n if not isinstance(packages, basestring):\n packages = ' '.join(packages)\n os.system('sudo apt-get install -y {packages}'.format(packages=packages))\n\n\n<mask token>\n\n\ndef write_file(path, data, mode='w'):\n if os.path.exists(path) and mode is not 'a':\n pathBAK = path + '.bak'\n os.rename(path, pathBAK)\n with open(path, mode) as handle:\n handle.write(data)\n\n\ndef remove_file(path, replace_with_backup=False):\n backup_path = path + '.bak'\n shutil.copy(path, backup_path)\n if os.path.exists(path):\n os.remove(path)\n if replace_with_backup and os.path.exists(backup_path):\n os.rename(path, backup_path)\n\n\ndef slugify(*args, **kwargs):\n return PipSlugify(*args, **kwargs)\n\n\ndef copy_and_backup_original(from_path, to_path):\n if os.path.exists(to_path):\n rename = to_path + '.bak'\n os.rename(to_path, rename)\n shutil.copytree(from_path, to_path)\n", "step-3": "<mask token>\n\n\ndef download_install_deb(package_path, package_url):\n download_file(package_path, package_url)\n install_debian_package_binary(package_path)\n remove_file(package_path)\n\n\ndef install_apt_packages(packages):\n if not isinstance(packages, basestring):\n packages = ' '.join(packages)\n os.system('sudo apt-get install -y {packages}'.format(packages=packages))\n\n\ndef download_file(target_path, source_url):\n try:\n r = requests.get(source_url, stream=True)\n with open(target_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n return True\n except:\n return False\n\n\ndef write_file(path, data, mode='w'):\n if os.path.exists(path) and mode is not 'a':\n pathBAK = path + '.bak'\n os.rename(path, pathBAK)\n with open(path, mode) as handle:\n handle.write(data)\n\n\ndef remove_file(path, replace_with_backup=False):\n backup_path = path + '.bak'\n shutil.copy(path, backup_path)\n if os.path.exists(path):\n os.remove(path)\n if replace_with_backup and os.path.exists(backup_path):\n os.rename(path, backup_path)\n\n\ndef slugify(*args, **kwargs):\n return PipSlugify(*args, **kwargs)\n\n\ndef copy_and_backup_original(from_path, to_path):\n if os.path.exists(to_path):\n rename = to_path + '.bak'\n os.rename(to_path, rename)\n shutil.copytree(from_path, to_path)\n", "step-4": "<mask token>\n\n\ndef install_debian_package_binary(package_path):\n os.system('sudo dpkg -i {package_path}'.format(package_path=package_path))\n os.system('sudo apt-get install -f')\n\n\ndef download_install_deb(package_path, package_url):\n download_file(package_path, package_url)\n install_debian_package_binary(package_path)\n remove_file(package_path)\n\n\ndef install_apt_packages(packages):\n if not isinstance(packages, basestring):\n packages = ' '.join(packages)\n os.system('sudo apt-get install -y {packages}'.format(packages=packages))\n\n\ndef download_file(target_path, source_url):\n try:\n r = requests.get(source_url, stream=True)\n with open(target_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n f.flush()\n return True\n except:\n return False\n\n\ndef write_file(path, data, mode='w'):\n if os.path.exists(path) and mode is not 'a':\n pathBAK = path + '.bak'\n os.rename(path, pathBAK)\n with open(path, mode) as handle:\n handle.write(data)\n\n\ndef remove_file(path, replace_with_backup=False):\n backup_path = path + '.bak'\n shutil.copy(path, backup_path)\n if os.path.exists(path):\n os.remove(path)\n if replace_with_backup and os.path.exists(backup_path):\n os.rename(path, backup_path)\n\n\ndef slugify(*args, **kwargs):\n return PipSlugify(*args, **kwargs)\n\n\ndef copy_and_backup_original(from_path, to_path):\n if os.path.exists(to_path):\n rename = to_path + '.bak'\n os.rename(to_path, rename)\n shutil.copytree(from_path, to_path)\n", "step-5": "import requests\nimport os\nfrom slugify import slugify as PipSlugify\nimport shutil\n\n# will install any valid .deb package\ndef install_debian_package_binary(package_path):\n os.system(\"sudo dpkg -i {package_path}\".format(\n package_path=package_path\n ))\n os.system(\"sudo apt-get install -f\")\n\ndef download_install_deb(package_path, package_url):\n download_file(package_path, package_url)\n install_debian_package_binary(package_path)\n remove_file(package_path)\n\ndef install_apt_packages(packages):\n if not isinstance(packages, basestring):\n packages = \" \".join(packages)\n os.system(\"sudo apt-get install -y {packages}\".format(packages=packages))\n\n# download a file available at source_url to target_path on the file system.\ndef download_file(target_path, source_url):\n try:\n # NOTE the stream=True parameter\n r = requests.get(source_url, stream=True)\n with open(target_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n f.flush()\n return True\n # TODO: better exception handling\n except:\n return False\n\n\ndef write_file(path, data, mode='w'):\n if os.path.exists(path) and mode is not 'a':\n pathBAK = path + \".bak\"\n os.rename(path, pathBAK)\n with open(path, mode) as handle:\n handle.write(data)\n\ndef remove_file(path, replace_with_backup=False):\n # make a backup\n backup_path = path + \".bak\"\n shutil.copy(path, backup_path)\n # remove the file\n if os.path.exists(path):\n os.remove(path)\n # replace existing with backup\n if replace_with_backup and os.path.exists(backup_path):\n os.rename(path, backup_path)\n\n# abstract the library choice/implementation of slugify from the installer\ndef slugify(*args, **kwargs):\n return PipSlugify(*args, **kwargs)\n\ndef copy_and_backup_original(from_path, to_path):\n if os.path.exists(to_path):\n rename = to_path + \".bak\"\n os.rename(to_path, rename)\n shutil.copytree(from_path, to_path)\n ", "step-ids": [ 4, 6, 7, 8, 10 ] }
[ 4, 6, 7, 8, 10 ]
<|reserved_special_token_0|> class MainHandler(BaseHandler): <|reserved_special_token_0|> def get(self): """Returns the root endpoint of the API.""" self.write( '{"error": "cryptochat-server main page, please refer to /api/message/new or /api/message/updates"}' ) class MessageNewHandler(BaseHandler): """Post a new message to the chat room.""" async def post(self): """ Add a new message to the server. """ await self.handle_request(self.messages_new_api, 1) class MessageUpdatesHandler(BaseHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): """Checks for the new message updates, waits until new messages are available.""" await self.handle_request(self.messages_updates_api, 1) class UsersHandler(BaseHandler): """Handler class providing /users POST requests.""" async def post(self): """Adds a new user to the database.""" await self.handle_request(self.users_api, 1) async def get(self): """Returns details of particular user.""" await self.handle_request(self.users_api, 1) class ChatsHandler(BaseHandler): """Handler providing /chats POST requests""" async def post(self): """Adds a new chat to the database.""" await self.handle_request(self.chats_api, 1) async def get(self): """Returns details of particular chat.""" await self.handle_request(self.chats_api, 1) class ChatsUserHandler(BaseHandler): """Handler providing /chats/user GET requests""" async def get(self): """Returns chats for the given user.""" await self.handle_request(self.chats_user_api, 1) class ContactsNewHandler(BaseHandler): """Handler providing /contacts POST requests""" async def post(self): """Adds a new contact to the database""" await self.handle_request(self.contacts_new_api, 1) async def get(self): """Returns details of particular contact.""" await self.handle_request(self.contacts_new_api, 1) class Application(tornado.web.Application): """ main cryptochat application class """ def __init__(self): handlers = [('/', MainHandler), ('/api/message/new', MessageNewHandler), ('/api/message/updates', MessageUpdatesHandler), ('/api/users', UsersHandler), ( '/api/chats', ChatsHandler), ('/api/chats/user', ChatsUserHandler), ('/api/contacts', ContactsNewHandler)] tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseHandler(tornado.web.RequestHandler): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> async def handle_request(self, api_endpoint, api_version): """Takes care of validation of input and execution of POST and GET methods.""" code = 400 data = self.get_post_data() request_method = self.request.method.lower() if data: try: res = await getattr(api_endpoint, 'process_' + request_method)( api_version, data) code = 200 except ValidationError as validerr: if validerr.absolute_path: res = '%s : %s' % (validerr.absolute_path.pop(), validerr.message) else: res = '%s' % validerr.message LOGGER.error('ValidationError: %s', res) raise tornado.web.HTTPError(reason=res) except ValueError as valuerr: res = str(valuerr) LOGGER.error('ValueError: %s', res) raise tornado.web.HTTPError(reason=res) except DatabaseError as dberr: err_id = dberr.__hash__() res = str(dberr.reason) LOGGER.error(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise dberr except Exception as err: err_id = err.__hash__() res = ( 'Internal server error <%s>:please include this error id in bug report.' % err_id) code = 500 LOGGER.exception(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise tornado.web.HTTPError(reason=res) else: res = 'Error: malformed input JSON.' LOGGER.error(res) raise tornado.web.HTTPError(reason=res) self.set_status(code) self.write(res) <|reserved_special_token_0|> class MainHandler(BaseHandler): """Handler for the API root.""" def get(self): """Returns the root endpoint of the API.""" self.write( '{"error": "cryptochat-server main page, please refer to /api/message/new or /api/message/updates"}' ) class MessageNewHandler(BaseHandler): """Post a new message to the chat room.""" async def post(self): """ Add a new message to the server. """ await self.handle_request(self.messages_new_api, 1) class MessageUpdatesHandler(BaseHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): """Checks for the new message updates, waits until new messages are available.""" await self.handle_request(self.messages_updates_api, 1) class UsersHandler(BaseHandler): """Handler class providing /users POST requests.""" async def post(self): """Adds a new user to the database.""" await self.handle_request(self.users_api, 1) async def get(self): """Returns details of particular user.""" await self.handle_request(self.users_api, 1) class ChatsHandler(BaseHandler): """Handler providing /chats POST requests""" async def post(self): """Adds a new chat to the database.""" await self.handle_request(self.chats_api, 1) async def get(self): """Returns details of particular chat.""" await self.handle_request(self.chats_api, 1) class ChatsUserHandler(BaseHandler): """Handler providing /chats/user GET requests""" async def get(self): """Returns chats for the given user.""" await self.handle_request(self.chats_user_api, 1) class ContactsNewHandler(BaseHandler): """Handler providing /contacts POST requests""" async def post(self): """Adds a new contact to the database""" await self.handle_request(self.contacts_new_api, 1) async def get(self): """Returns details of particular contact.""" await self.handle_request(self.contacts_new_api, 1) class Application(tornado.web.Application): """ main cryptochat application class """ def __init__(self): handlers = [('/', MainHandler), ('/api/message/new', MessageNewHandler), ('/api/message/updates', MessageUpdatesHandler), ('/api/users', UsersHandler), ( '/api/chats', ChatsHandler), ('/api/chats/user', ChatsUserHandler), ('/api/contacts', ContactsNewHandler)] tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseHandler(tornado.web.RequestHandler): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def data_received(self, chunk): pass def set_default_headers(self): self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Headers', 'Content-Type') <|reserved_special_token_0|> def get_post_data(self): """extract input JSON from POST request""" json_data = '' if self.request.files: json_data = self.request.files['file'][0]['body'] elif self.request.body: json_data = self.request.body try: data = json.loads(json_data) except ValueError: data = None return data async def handle_request(self, api_endpoint, api_version): """Takes care of validation of input and execution of POST and GET methods.""" code = 400 data = self.get_post_data() request_method = self.request.method.lower() if data: try: res = await getattr(api_endpoint, 'process_' + request_method)( api_version, data) code = 200 except ValidationError as validerr: if validerr.absolute_path: res = '%s : %s' % (validerr.absolute_path.pop(), validerr.message) else: res = '%s' % validerr.message LOGGER.error('ValidationError: %s', res) raise tornado.web.HTTPError(reason=res) except ValueError as valuerr: res = str(valuerr) LOGGER.error('ValueError: %s', res) raise tornado.web.HTTPError(reason=res) except DatabaseError as dberr: err_id = dberr.__hash__() res = str(dberr.reason) LOGGER.error(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise dberr except Exception as err: err_id = err.__hash__() res = ( 'Internal server error <%s>:please include this error id in bug report.' % err_id) code = 500 LOGGER.exception(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise tornado.web.HTTPError(reason=res) else: res = 'Error: malformed input JSON.' LOGGER.error(res) raise tornado.web.HTTPError(reason=res) self.set_status(code) self.write(res) <|reserved_special_token_0|> class MainHandler(BaseHandler): """Handler for the API root.""" def get(self): """Returns the root endpoint of the API.""" self.write( '{"error": "cryptochat-server main page, please refer to /api/message/new or /api/message/updates"}' ) class MessageNewHandler(BaseHandler): """Post a new message to the chat room.""" async def post(self): """ Add a new message to the server. """ await self.handle_request(self.messages_new_api, 1) class MessageUpdatesHandler(BaseHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): """Checks for the new message updates, waits until new messages are available.""" await self.handle_request(self.messages_updates_api, 1) class UsersHandler(BaseHandler): """Handler class providing /users POST requests.""" async def post(self): """Adds a new user to the database.""" await self.handle_request(self.users_api, 1) async def get(self): """Returns details of particular user.""" await self.handle_request(self.users_api, 1) class ChatsHandler(BaseHandler): """Handler providing /chats POST requests""" async def post(self): """Adds a new chat to the database.""" await self.handle_request(self.chats_api, 1) async def get(self): """Returns details of particular chat.""" await self.handle_request(self.chats_api, 1) class ChatsUserHandler(BaseHandler): """Handler providing /chats/user GET requests""" async def get(self): """Returns chats for the given user.""" await self.handle_request(self.chats_user_api, 1) class ContactsNewHandler(BaseHandler): """Handler providing /contacts POST requests""" async def post(self): """Adds a new contact to the database""" await self.handle_request(self.contacts_new_api, 1) async def get(self): """Returns details of particular contact.""" await self.handle_request(self.contacts_new_api, 1) class Application(tornado.web.Application): """ main cryptochat application class """ def __init__(self): handlers = [('/', MainHandler), ('/api/message/new', MessageNewHandler), ('/api/message/updates', MessageUpdatesHandler), ('/api/users', UsersHandler), ( '/api/chats', ChatsHandler), ('/api/chats/user', ChatsUserHandler), ('/api/contacts', ContactsNewHandler)] tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseHandler(tornado.web.RequestHandler): <|reserved_special_token_0|> messages_new_api = None messages_updates_api = None users_api = None chats_api = None chats_user_api = None contacts_new_api = None def data_received(self, chunk): pass def set_default_headers(self): self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Headers', 'Content-Type') def options(self): """Answer OPTIONS request.""" self.finish() def get_post_data(self): """extract input JSON from POST request""" json_data = '' if self.request.files: json_data = self.request.files['file'][0]['body'] elif self.request.body: json_data = self.request.body try: data = json.loads(json_data) except ValueError: data = None return data async def handle_request(self, api_endpoint, api_version): """Takes care of validation of input and execution of POST and GET methods.""" code = 400 data = self.get_post_data() request_method = self.request.method.lower() if data: try: res = await getattr(api_endpoint, 'process_' + request_method)( api_version, data) code = 200 except ValidationError as validerr: if validerr.absolute_path: res = '%s : %s' % (validerr.absolute_path.pop(), validerr.message) else: res = '%s' % validerr.message LOGGER.error('ValidationError: %s', res) raise tornado.web.HTTPError(reason=res) except ValueError as valuerr: res = str(valuerr) LOGGER.error('ValueError: %s', res) raise tornado.web.HTTPError(reason=res) except DatabaseError as dberr: err_id = dberr.__hash__() res = str(dberr.reason) LOGGER.error(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise dberr except Exception as err: err_id = err.__hash__() res = ( 'Internal server error <%s>:please include this error id in bug report.' % err_id) code = 500 LOGGER.exception(res) LOGGER.info('Input data for <%s>: %s', err_id, data) raise tornado.web.HTTPError(reason=res) else: res = 'Error: malformed input JSON.' LOGGER.error(res) raise tornado.web.HTTPError(reason=res) self.set_status(code) self.write(res) def write_error(self, status_code, **kwargs): self.set_header('Content-Type', 'application/json') if self.settings.get('serve_traceback') and 'exc_info' in kwargs: lines = [] for line in traceback.format_exception(*kwargs['exc_info']): lines.append(line) self.finish(json.dumps({'error': {'code': status_code, 'message': self._reason, 'traceback': lines}})) else: self.finish(json.dumps({'error': {'code': status_code, 'message': self._reason}})) class MainHandler(BaseHandler): """Handler for the API root.""" def get(self): """Returns the root endpoint of the API.""" self.write( '{"error": "cryptochat-server main page, please refer to /api/message/new or /api/message/updates"}' ) class MessageNewHandler(BaseHandler): """Post a new message to the chat room.""" async def post(self): """ Add a new message to the server. """ await self.handle_request(self.messages_new_api, 1) class MessageUpdatesHandler(BaseHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): """Checks for the new message updates, waits until new messages are available.""" await self.handle_request(self.messages_updates_api, 1) class UsersHandler(BaseHandler): """Handler class providing /users POST requests.""" async def post(self): """Adds a new user to the database.""" await self.handle_request(self.users_api, 1) async def get(self): """Returns details of particular user.""" await self.handle_request(self.users_api, 1) class ChatsHandler(BaseHandler): """Handler providing /chats POST requests""" async def post(self): """Adds a new chat to the database.""" await self.handle_request(self.chats_api, 1) async def get(self): """Returns details of particular chat.""" await self.handle_request(self.chats_api, 1) class ChatsUserHandler(BaseHandler): """Handler providing /chats/user GET requests""" async def get(self): """Returns chats for the given user.""" await self.handle_request(self.chats_user_api, 1) class ContactsNewHandler(BaseHandler): """Handler providing /contacts POST requests""" async def post(self): """Adds a new contact to the database""" await self.handle_request(self.contacts_new_api, 1) async def get(self): """Returns details of particular contact.""" await self.handle_request(self.contacts_new_api, 1) class Application(tornado.web.Application): """ main cryptochat application class """ def __init__(self): handlers = [('/', MainHandler), ('/api/message/new', MessageNewHandler), ('/api/message/updates', MessageUpdatesHandler), ('/api/users', UsersHandler), ( '/api/chats', ChatsHandler), ('/api/chats/user', ChatsUserHandler), ('/api/contacts', ContactsNewHandler)] tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False) <|reserved_special_token_0|> <|reserved_special_token_1|> #!/usr/bin/env python3 """ Main chat API module """ import json import os import signal import traceback import tornado.escape import tornado.gen import tornado.httpserver import tornado.ioloop import tornado.locks import tornado.web from jsonschema.exceptions import ValidationError from db import DB, DatabaseError from logging_utils import get_logger, init_logging from messages import MessagesNewAPI from messages import MessagesUpdatesAPI from users import UsersAPI from chats import ChatsAPI, ChatsUserAPI from contacts import ContactsAPI LOGGER = get_logger(__name__) SERVER_VERSION = os.getenv('VERSION', 'unknown') PUBLIC_API_PORT = 8888 DATABASE_LOCATION = os.getenv('DATABASE_LOCATION', '/tmp/cryptochat_db.json') _SHUTDOWN_TIMEOUT = 3 class BaseHandler(tornado.web.RequestHandler): """Base handler setting CORS headers.""" messages_new_api = None messages_updates_api = None users_api = None chats_api = None chats_user_api = None contacts_new_api = None def data_received(self, chunk): pass def set_default_headers(self): self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Access-Control-Allow-Headers", "Content-Type") def options(self): """Answer OPTIONS request.""" self.finish() def get_post_data(self): """extract input JSON from POST request""" json_data = '' # check if JSON is passed as a file or as a body of POST request if self.request.files: json_data = self.request.files['file'][0][ 'body'] # pick up only first file (index 0) elif self.request.body: json_data = self.request.body try: data = json.loads(json_data) except ValueError: data = None return data async def handle_request(self, api_endpoint, api_version): """Takes care of validation of input and execution of POST and GET methods.""" code = 400 data = self.get_post_data() request_method = self.request.method.lower() if data: try: # will call process_get or process_post methods for the given API res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data) code = 200 except ValidationError as validerr: if validerr.absolute_path: res = '%s : %s' % ( validerr.absolute_path.pop(), validerr.message) else: res = '%s' % validerr.message LOGGER.error('ValidationError: %s', res) raise tornado.web.HTTPError(reason=res) except ValueError as valuerr: res = str(valuerr) LOGGER.error('ValueError: %s', res) raise tornado.web.HTTPError(reason=res) except DatabaseError as dberr: err_id = dberr.__hash__() res = str(dberr.reason) LOGGER.error(res) LOGGER.info("Input data for <%s>: %s", err_id, data) raise dberr except Exception as err: # pylint: disable=broad-except err_id = err.__hash__() res = 'Internal server error <%s>:' \ 'please include this error id in bug report.' % err_id code = 500 LOGGER.exception(res) LOGGER.info("Input data for <%s>: %s", err_id, data) raise tornado.web.HTTPError(reason=res) else: res = 'Error: malformed input JSON.' LOGGER.error(res) raise tornado.web.HTTPError(reason=res) # raise tornado.web.HTTPError(status_code=444, reason='error happened') self.set_status(code) self.write(res) def write_error(self, status_code, **kwargs): self.set_header('Content-Type', 'application/json') if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback lines = [] for line in traceback.format_exception(*kwargs["exc_info"]): lines.append(line) self.finish(json.dumps({ 'error': { 'code': status_code, 'message': self._reason, 'traceback': lines, } })) else: self.finish(json.dumps({ 'error': { 'code': status_code, 'message': self._reason, } })) class MainHandler(BaseHandler): """Handler for the API root.""" def get(self): """Returns the root endpoint of the API.""" self.write( '{"error": "cryptochat-server main page, ' 'please refer to /api/message/new or /api/message/updates"}') class MessageNewHandler(BaseHandler): """Post a new message to the chat room.""" async def post(self): """ Add a new message to the server. """ await self.handle_request(self.messages_new_api, 1) class MessageUpdatesHandler(BaseHandler): """Long-polling request for new messages. Waits until new messages are available before returning anything. """ async def post(self): """Checks for the new message updates, waits until new messages are available.""" await self.handle_request(self.messages_updates_api, 1) # def on_connection_close(self): # self.wait_future.cancel() class UsersHandler(BaseHandler): """Handler class providing /users POST requests.""" async def post(self): """Adds a new user to the database.""" await self.handle_request(self.users_api, 1) async def get(self): """Returns details of particular user.""" await self.handle_request(self.users_api, 1) class ChatsHandler(BaseHandler): """Handler providing /chats POST requests""" async def post(self): """Adds a new chat to the database.""" await self.handle_request(self.chats_api, 1) async def get(self): """Returns details of particular chat.""" await self.handle_request(self.chats_api, 1) class ChatsUserHandler(BaseHandler): """Handler providing /chats/user GET requests""" async def get(self): """Returns chats for the given user.""" await self.handle_request(self.chats_user_api, 1) class ContactsNewHandler(BaseHandler): """Handler providing /contacts POST requests""" async def post(self): """Adds a new contact to the database""" await self.handle_request(self.contacts_new_api, 1) async def get(self): """Returns details of particular contact.""" await self.handle_request(self.contacts_new_api, 1) class Application(tornado.web.Application): """ main cryptochat application class """ def __init__(self): handlers = [ (r"/", MainHandler), (r"/api/message/new", MessageNewHandler), (r"/api/message/updates", MessageUpdatesHandler), (r"/api/users", UsersHandler), (r"/api/chats", ChatsHandler), (r"/api/chats/user", ChatsUserHandler), (r"/api/contacts", ContactsNewHandler), ] tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False) def main(): """ The main function. It creates cryptochat application, run everything.""" async def shutdown(): server.stop() await tornado.gen.sleep(_SHUTDOWN_TIMEOUT) tornado.ioloop.IOLoop.current().stop() LOGGER.info("Server was successfully shut down.") def exit_handler(sig, frame): # pylint: disable=unused-argument def get_sig_name(sig): return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items())) if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig) LOGGER.warning("Registered %s, shutting down.", get_sig_name(sig)) tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown) signal.signal(signal.SIGTERM, exit_handler) signal.signal(signal.SIGINT, exit_handler) init_logging() cryptochat_db = DB(DATABASE_LOCATION) cryptochat_app = Application() server = tornado.httpserver.HTTPServer(cryptochat_app) server.bind(PUBLIC_API_PORT) server.start() LOGGER.info("Starting cryptochat (version %s).", SERVER_VERSION) BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db) BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db) BaseHandler.users_api = UsersAPI(cryptochat_db) BaseHandler.chats_api = ChatsAPI(cryptochat_db) BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db) BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main()
flexible
{ "blob_id": "9f8d79d141d414c1256e39f58e59f97711acfee4", "index": 4915, "step-1": "<mask token>\n\n\nclass MainHandler(BaseHandler):\n <mask token>\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n <mask token>\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'Content-Type')\n <mask token>\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n if self.request.files:\n json_data = self.request.files['file'][0]['body']\n elif self.request.body:\n json_data = self.request.body\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n <mask token>\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n messages_new_api = None\n messages_updates_api = None\n users_api = None\n chats_api = None\n chats_user_api = None\n contacts_new_api = None\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'Content-Type')\n\n def options(self):\n \"\"\"Answer OPTIONS request.\"\"\"\n self.finish()\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n if self.request.files:\n json_data = self.request.files['file'][0]['body']\n elif self.request.body:\n json_data = self.request.body\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n res = await getattr(api_endpoint, 'process_' + request_method)(\n api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (validerr.absolute_path.pop(),\n validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise dberr\n except Exception as err:\n err_id = err.__hash__()\n res = (\n 'Internal server error <%s>:please include this error id in bug report.'\n % err_id)\n code = 500\n LOGGER.exception(res)\n LOGGER.info('Input data for <%s>: %s', err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n self.set_status(code)\n self.write(res)\n\n def write_error(self, status_code, **kwargs):\n self.set_header('Content-Type', 'application/json')\n if self.settings.get('serve_traceback') and 'exc_info' in kwargs:\n lines = []\n for line in traceback.format_exception(*kwargs['exc_info']):\n lines.append(line)\n self.finish(json.dumps({'error': {'code': status_code,\n 'message': self._reason, 'traceback': lines}}))\n else:\n self.finish(json.dumps({'error': {'code': status_code,\n 'message': self._reason}}))\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, please refer to /api/message/new or /api/message/updates\"}'\n )\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [('/', MainHandler), ('/api/message/new',\n MessageNewHandler), ('/api/message/updates',\n MessageUpdatesHandler), ('/api/users', UsersHandler), (\n '/api/chats', ChatsHandler), ('/api/chats/user',\n ChatsUserHandler), ('/api/contacts', ContactsNewHandler)]\n tornado.web.Application.__init__(self, handlers, debug=True,\n serve_traceback=False)\n\n\n<mask token>\n", "step-5": "#!/usr/bin/env python3\n\"\"\"\nMain chat API module\n\"\"\"\n\nimport json\nimport os\nimport signal\nimport traceback\n\nimport tornado.escape\nimport tornado.gen\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.locks\nimport tornado.web\nfrom jsonschema.exceptions import ValidationError\n\nfrom db import DB, DatabaseError\nfrom logging_utils import get_logger, init_logging\nfrom messages import MessagesNewAPI\nfrom messages import MessagesUpdatesAPI\nfrom users import UsersAPI\nfrom chats import ChatsAPI, ChatsUserAPI\nfrom contacts import ContactsAPI\n\nLOGGER = get_logger(__name__)\nSERVER_VERSION = os.getenv('VERSION', 'unknown')\nPUBLIC_API_PORT = 8888\nDATABASE_LOCATION = os.getenv('DATABASE_LOCATION', '/tmp/cryptochat_db.json')\n_SHUTDOWN_TIMEOUT = 3\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"Base handler setting CORS headers.\"\"\"\n\n messages_new_api = None\n messages_updates_api = None\n users_api = None\n chats_api = None\n chats_user_api = None\n contacts_new_api = None\n\n def data_received(self, chunk):\n pass\n\n def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\n def options(self):\n \"\"\"Answer OPTIONS request.\"\"\"\n self.finish()\n\n def get_post_data(self):\n \"\"\"extract input JSON from POST request\"\"\"\n json_data = ''\n\n # check if JSON is passed as a file or as a body of POST request\n if self.request.files:\n json_data = self.request.files['file'][0][\n 'body'] # pick up only first file (index 0)\n elif self.request.body:\n json_data = self.request.body\n\n try:\n data = json.loads(json_data)\n except ValueError:\n data = None\n return data\n\n async def handle_request(self, api_endpoint, api_version):\n \"\"\"Takes care of validation of input and execution of POST and GET methods.\"\"\"\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n # will call process_get or process_post methods for the given API\n res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (\n validerr.absolute_path.pop(), validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise dberr\n except Exception as err: # pylint: disable=broad-except\n err_id = err.__hash__()\n res = 'Internal server error <%s>:' \\\n 'please include this error id in bug report.' % err_id\n code = 500\n LOGGER.exception(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n\n # raise tornado.web.HTTPError(status_code=444, reason='error happened')\n self.set_status(code)\n self.write(res)\n\n def write_error(self, status_code, **kwargs):\n\n self.set_header('Content-Type', 'application/json')\n if self.settings.get(\"serve_traceback\") and \"exc_info\" in kwargs:\n # in debug mode, try to send a traceback\n lines = []\n for line in traceback.format_exception(*kwargs[\"exc_info\"]):\n lines.append(line)\n self.finish(json.dumps({\n 'error': {\n 'code': status_code,\n 'message': self._reason,\n 'traceback': lines,\n }\n }))\n else:\n self.finish(json.dumps({\n 'error': {\n 'code': status_code,\n 'message': self._reason,\n }\n }))\n\n\nclass MainHandler(BaseHandler):\n \"\"\"Handler for the API root.\"\"\"\n\n def get(self):\n \"\"\"Returns the root endpoint of the API.\"\"\"\n self.write(\n '{\"error\": \"cryptochat-server main page, '\n 'please refer to /api/message/new or /api/message/updates\"}')\n\n\nclass MessageNewHandler(BaseHandler):\n \"\"\"Post a new message to the chat room.\"\"\"\n\n async def post(self):\n \"\"\"\n Add a new message to the server.\n \"\"\"\n await self.handle_request(self.messages_new_api, 1)\n\n\nclass MessageUpdatesHandler(BaseHandler):\n \"\"\"Long-polling request for new messages.\n\n Waits until new messages are available before returning anything.\n \"\"\"\n\n async def post(self):\n \"\"\"Checks for the new message updates, waits until\n new messages are available.\"\"\"\n await self.handle_request(self.messages_updates_api, 1)\n\n # def on_connection_close(self):\n # self.wait_future.cancel()\n\n\nclass UsersHandler(BaseHandler):\n \"\"\"Handler class providing /users POST requests.\"\"\"\n\n async def post(self):\n \"\"\"Adds a new user to the database.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular user.\"\"\"\n await self.handle_request(self.users_api, 1)\n\n\nclass ChatsHandler(BaseHandler):\n \"\"\"Handler providing /chats POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new chat to the database.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular chat.\"\"\"\n await self.handle_request(self.chats_api, 1)\n\n\nclass ChatsUserHandler(BaseHandler):\n \"\"\"Handler providing /chats/user GET requests\"\"\"\n\n async def get(self):\n \"\"\"Returns chats for the given user.\"\"\"\n await self.handle_request(self.chats_user_api, 1)\n\n\nclass ContactsNewHandler(BaseHandler):\n \"\"\"Handler providing /contacts POST requests\"\"\"\n\n async def post(self):\n \"\"\"Adds a new contact to the database\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n async def get(self):\n \"\"\"Returns details of particular contact.\"\"\"\n await self.handle_request(self.contacts_new_api, 1)\n\n\nclass Application(tornado.web.Application):\n \"\"\" main cryptochat application class \"\"\"\n\n def __init__(self):\n handlers = [\n (r\"/\", MainHandler),\n (r\"/api/message/new\", MessageNewHandler),\n (r\"/api/message/updates\", MessageUpdatesHandler),\n (r\"/api/users\", UsersHandler),\n (r\"/api/chats\", ChatsHandler),\n (r\"/api/chats/user\", ChatsUserHandler),\n (r\"/api/contacts\", ContactsNewHandler),\n ]\n\n tornado.web.Application.__init__(self, handlers, debug=True, serve_traceback=False)\n\n\ndef main():\n \"\"\" The main function. It creates cryptochat application, run everything.\"\"\"\n\n async def shutdown():\n server.stop()\n await tornado.gen.sleep(_SHUTDOWN_TIMEOUT)\n tornado.ioloop.IOLoop.current().stop()\n LOGGER.info(\"Server was successfully shut down.\")\n\n def exit_handler(sig, frame): # pylint: disable=unused-argument\n def get_sig_name(sig):\n return dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))\n if v.startswith('SIG') and not v.startswith('SIG_')).pop(sig)\n\n LOGGER.warning(\"Registered %s, shutting down.\", get_sig_name(sig))\n tornado.ioloop.IOLoop.instance().add_callback_from_signal(shutdown)\n\n signal.signal(signal.SIGTERM, exit_handler)\n signal.signal(signal.SIGINT, exit_handler)\n\n init_logging()\n cryptochat_db = DB(DATABASE_LOCATION)\n\n cryptochat_app = Application()\n server = tornado.httpserver.HTTPServer(cryptochat_app)\n server.bind(PUBLIC_API_PORT)\n server.start()\n LOGGER.info(\"Starting cryptochat (version %s).\", SERVER_VERSION)\n\n BaseHandler.messages_new_api = MessagesNewAPI(cryptochat_db)\n BaseHandler.messages_updates_api = MessagesUpdatesAPI(cryptochat_db)\n BaseHandler.users_api = UsersAPI(cryptochat_db)\n BaseHandler.chats_api = ChatsAPI(cryptochat_db)\n BaseHandler.chats_user_api = ChatsUserAPI(cryptochat_db)\n BaseHandler.contacts_new_api = ContactsAPI(cryptochat_db)\n\n tornado.ioloop.IOLoop.current().start()\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 17, 19, 22, 25, 31 ] }
[ 17, 19, 22, 25, 31 ]
# -*- coding: utf-8 -*- """Code handling the concurrency of data analysis."""
normal
{ "blob_id": "2e23225ec4cd693f5e9460a13d64206f184a86a0", "index": 3043, "step-1": "<mask token>\n", "step-2": "# -*- coding: utf-8 -*-\n\"\"\"Code handling the concurrency of data analysis.\"\"\"\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> class V_test_abstract(V): def __init__(self): super(V_test_abstract, self).__init__() <|reserved_special_token_0|> def forward(self): z = self.beta[:self.dim] r1_local = self.beta[self.dim:2 * self.dim] r2_local = self.beta[2 * self.dim:3 * self.dim] r1_local_plus = self.beta[3 * self.dim:4 * self.dim] r2_local_plus = self.beta[4 * self.dim:5 * self.dim] r1_global = self.beta[5 * self.dim] r2_global = self.beta[5 * self.dim + 1] sigma = self.beta[5 * self.dim + 2] w0 = self.beta[5 * self.dim + 3] tau = r1_global * torch.sqrt(r2_global) lamb = r1_local * torch.sqrt(r2_local) lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus) w = z * lamb * lambda_plus * tau outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w)) ) / (sigma * sigma) * 0.5 outz = torch.dot(z, z) * 0.5 outr1_local = torch.dot(r1_local, r1_local) outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 * self.nu / r2_local).sum() outr1_global = r1_global * r1_global * 0.5 outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global outw0 = w0 * w0 / 25.0 out = (outy + outz + outr1_local + outr2_local + outr1_global + outr2_global + outw0) return out <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class V_test_abstract(V): def __init__(self): super(V_test_abstract, self).__init__() def V_setup(self, y, X, nu): self.explicit_gradient = False self.need_higherorderderiv = True self.dim = X.shape[1] self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4), requires_grad=True) self.y = y self.X = X self.nu = nu return () def forward(self): z = self.beta[:self.dim] r1_local = self.beta[self.dim:2 * self.dim] r2_local = self.beta[2 * self.dim:3 * self.dim] r1_local_plus = self.beta[3 * self.dim:4 * self.dim] r2_local_plus = self.beta[4 * self.dim:5 * self.dim] r1_global = self.beta[5 * self.dim] r2_global = self.beta[5 * self.dim + 1] sigma = self.beta[5 * self.dim + 2] w0 = self.beta[5 * self.dim + 3] tau = r1_global * torch.sqrt(r2_global) lamb = r1_local * torch.sqrt(r2_local) lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus) w = z * lamb * lambda_plus * tau outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w)) ) / (sigma * sigma) * 0.5 outz = torch.dot(z, z) * 0.5 outr1_local = torch.dot(r1_local, r1_local) outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 * self.nu / r2_local).sum() outr1_global = r1_global * r1_global * 0.5 outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global outw0 = w0 * w0 / 25.0 out = (outy + outz + outr1_local + outr2_local + outr1_global + outr2_global + outw0) return out <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class V_test_abstract(V): def __init__(self): super(V_test_abstract, self).__init__() def V_setup(self, y, X, nu): self.explicit_gradient = False self.need_higherorderderiv = True self.dim = X.shape[1] self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4), requires_grad=True) self.y = y self.X = X self.nu = nu return () def forward(self): z = self.beta[:self.dim] r1_local = self.beta[self.dim:2 * self.dim] r2_local = self.beta[2 * self.dim:3 * self.dim] r1_local_plus = self.beta[3 * self.dim:4 * self.dim] r2_local_plus = self.beta[4 * self.dim:5 * self.dim] r1_global = self.beta[5 * self.dim] r2_global = self.beta[5 * self.dim + 1] sigma = self.beta[5 * self.dim + 2] w0 = self.beta[5 * self.dim + 3] tau = r1_global * torch.sqrt(r2_global) lamb = r1_local * torch.sqrt(r2_local) lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus) w = z * lamb * lambda_plus * tau outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w)) ) / (sigma * sigma) * 0.5 outz = torch.dot(z, z) * 0.5 outr1_local = torch.dot(r1_local, r1_local) outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 * self.nu / r2_local).sum() outr1_global = r1_global * r1_global * 0.5 outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global outw0 = w0 * w0 / 25.0 out = (outy + outz + outr1_local + outr2_local + outr1_global + outr2_global + outw0) return out def load_explcit_gradient(self): return () <|reserved_special_token_1|> from abstract_class_V import V import torch import torch.nn as nn class V_test_abstract(V): def __init__(self): super(V_test_abstract, self).__init__() def V_setup(self, y, X, nu): self.explicit_gradient = False self.need_higherorderderiv = True self.dim = X.shape[1] self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4), requires_grad=True) self.y = y self.X = X self.nu = nu return () def forward(self): z = self.beta[:self.dim] r1_local = self.beta[self.dim:2 * self.dim] r2_local = self.beta[2 * self.dim:3 * self.dim] r1_local_plus = self.beta[3 * self.dim:4 * self.dim] r2_local_plus = self.beta[4 * self.dim:5 * self.dim] r1_global = self.beta[5 * self.dim] r2_global = self.beta[5 * self.dim + 1] sigma = self.beta[5 * self.dim + 2] w0 = self.beta[5 * self.dim + 3] tau = r1_global * torch.sqrt(r2_global) lamb = r1_local * torch.sqrt(r2_local) lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus) w = z * lamb * lambda_plus * tau outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w)) ) / (sigma * sigma) * 0.5 outz = torch.dot(z, z) * 0.5 outr1_local = torch.dot(r1_local, r1_local) outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 * self.nu / r2_local).sum() outr1_global = r1_global * r1_global * 0.5 outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global outw0 = w0 * w0 / 25.0 out = (outy + outz + outr1_local + outr2_local + outr1_global + outr2_global + outw0) return out def load_explcit_gradient(self): return () <|reserved_special_token_1|> from abstract_class_V import V import torch import torch.nn as nn class V_test_abstract(V): def __init__(self): super(V_test_abstract, self).__init__() def V_setup(self,y,X,nu): self.explicit_gradient = False self.need_higherorderderiv = True self.dim = X.shape[1] self.beta = nn.Parameter(torch.zeros(self.dim*5+4),requires_grad=True) self.y = y self.X = X self.nu = nu # beta[:dim] = z # beta[(dim):(2dim)] = r1_local # beta[(2dim):(3dim)] = r2_local # beta[(3dim):(4dim)] = r1_local_plus # beta[(4dim):(5dim)] = r2_local_plus # beta[5dim] = r1_global # beta[5dim+1] = r2_global # beta[5dim+2] = sigma # beta[5dim+3] = w0 return() def forward(self): z = self.beta[:self.dim] r1_local = self.beta[(self.dim):(2*self.dim)] r2_local = self.beta[(2*self.dim):(3*self.dim)] r1_local_plus = self.beta[(3*self.dim):(4*self.dim)] r2_local_plus = self.beta[(4*self.dim):(5*self.dim)] r1_global = self.beta[5*self.dim] r2_global = self.beta[5*self.dim+1] sigma = self.beta[5*self.dim+2] w0 = self.beta[5*self.dim+3] tau = r1_global * torch.sqrt(r2_global) lamb = r1_local * torch.sqrt(r2_local) lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus) w = z * lamb * lambda_plus * tau outy = (self.y - (w0 + self.X.mv(w)))*(self.y - (w0 + self.X.mv(w)))/(sigma*sigma) * 0.5 outz = torch.dot(z,z) * 0.5 outr1_local = torch.dot(r1_local,r1_local) outr2_local = ((0.5*self.nu+1)*torch.log(r2_local) + 0.5 * self.nu/r2_local).sum() outr1_global = r1_global*r1_global * 0.5 outr2_global = 1.5 * torch.log(r2_global) + 0.5/r2_global outw0 = w0*w0/(25.) out = outy+outz+outr1_local+outr2_local+outr1_global+outr2_global+outw0 return(out) def load_explcit_gradient(self): return()
flexible
{ "blob_id": "27e9e63338d422b5fca6f7a67fa3d255602a3358", "index": 225, "step-1": "<mask token>\n\n\nclass V_test_abstract(V):\n\n def __init__(self):\n super(V_test_abstract, self).__init__()\n <mask token>\n\n def forward(self):\n z = self.beta[:self.dim]\n r1_local = self.beta[self.dim:2 * self.dim]\n r2_local = self.beta[2 * self.dim:3 * self.dim]\n r1_local_plus = self.beta[3 * self.dim:4 * self.dim]\n r2_local_plus = self.beta[4 * self.dim:5 * self.dim]\n r1_global = self.beta[5 * self.dim]\n r2_global = self.beta[5 * self.dim + 1]\n sigma = self.beta[5 * self.dim + 2]\n w0 = self.beta[5 * self.dim + 3]\n tau = r1_global * torch.sqrt(r2_global)\n lamb = r1_local * torch.sqrt(r2_local)\n lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus)\n w = z * lamb * lambda_plus * tau\n outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w))\n ) / (sigma * sigma) * 0.5\n outz = torch.dot(z, z) * 0.5\n outr1_local = torch.dot(r1_local, r1_local)\n outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 *\n self.nu / r2_local).sum()\n outr1_global = r1_global * r1_global * 0.5\n outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global\n outw0 = w0 * w0 / 25.0\n out = (outy + outz + outr1_local + outr2_local + outr1_global +\n outr2_global + outw0)\n return out\n <mask token>\n", "step-2": "<mask token>\n\n\nclass V_test_abstract(V):\n\n def __init__(self):\n super(V_test_abstract, self).__init__()\n\n def V_setup(self, y, X, nu):\n self.explicit_gradient = False\n self.need_higherorderderiv = True\n self.dim = X.shape[1]\n self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4),\n requires_grad=True)\n self.y = y\n self.X = X\n self.nu = nu\n return ()\n\n def forward(self):\n z = self.beta[:self.dim]\n r1_local = self.beta[self.dim:2 * self.dim]\n r2_local = self.beta[2 * self.dim:3 * self.dim]\n r1_local_plus = self.beta[3 * self.dim:4 * self.dim]\n r2_local_plus = self.beta[4 * self.dim:5 * self.dim]\n r1_global = self.beta[5 * self.dim]\n r2_global = self.beta[5 * self.dim + 1]\n sigma = self.beta[5 * self.dim + 2]\n w0 = self.beta[5 * self.dim + 3]\n tau = r1_global * torch.sqrt(r2_global)\n lamb = r1_local * torch.sqrt(r2_local)\n lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus)\n w = z * lamb * lambda_plus * tau\n outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w))\n ) / (sigma * sigma) * 0.5\n outz = torch.dot(z, z) * 0.5\n outr1_local = torch.dot(r1_local, r1_local)\n outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 *\n self.nu / r2_local).sum()\n outr1_global = r1_global * r1_global * 0.5\n outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global\n outw0 = w0 * w0 / 25.0\n out = (outy + outz + outr1_local + outr2_local + outr1_global +\n outr2_global + outw0)\n return out\n <mask token>\n", "step-3": "<mask token>\n\n\nclass V_test_abstract(V):\n\n def __init__(self):\n super(V_test_abstract, self).__init__()\n\n def V_setup(self, y, X, nu):\n self.explicit_gradient = False\n self.need_higherorderderiv = True\n self.dim = X.shape[1]\n self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4),\n requires_grad=True)\n self.y = y\n self.X = X\n self.nu = nu\n return ()\n\n def forward(self):\n z = self.beta[:self.dim]\n r1_local = self.beta[self.dim:2 * self.dim]\n r2_local = self.beta[2 * self.dim:3 * self.dim]\n r1_local_plus = self.beta[3 * self.dim:4 * self.dim]\n r2_local_plus = self.beta[4 * self.dim:5 * self.dim]\n r1_global = self.beta[5 * self.dim]\n r2_global = self.beta[5 * self.dim + 1]\n sigma = self.beta[5 * self.dim + 2]\n w0 = self.beta[5 * self.dim + 3]\n tau = r1_global * torch.sqrt(r2_global)\n lamb = r1_local * torch.sqrt(r2_local)\n lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus)\n w = z * lamb * lambda_plus * tau\n outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w))\n ) / (sigma * sigma) * 0.5\n outz = torch.dot(z, z) * 0.5\n outr1_local = torch.dot(r1_local, r1_local)\n outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 *\n self.nu / r2_local).sum()\n outr1_global = r1_global * r1_global * 0.5\n outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global\n outw0 = w0 * w0 / 25.0\n out = (outy + outz + outr1_local + outr2_local + outr1_global +\n outr2_global + outw0)\n return out\n\n def load_explcit_gradient(self):\n return ()\n", "step-4": "from abstract_class_V import V\nimport torch\nimport torch.nn as nn\n\n\nclass V_test_abstract(V):\n\n def __init__(self):\n super(V_test_abstract, self).__init__()\n\n def V_setup(self, y, X, nu):\n self.explicit_gradient = False\n self.need_higherorderderiv = True\n self.dim = X.shape[1]\n self.beta = nn.Parameter(torch.zeros(self.dim * 5 + 4),\n requires_grad=True)\n self.y = y\n self.X = X\n self.nu = nu\n return ()\n\n def forward(self):\n z = self.beta[:self.dim]\n r1_local = self.beta[self.dim:2 * self.dim]\n r2_local = self.beta[2 * self.dim:3 * self.dim]\n r1_local_plus = self.beta[3 * self.dim:4 * self.dim]\n r2_local_plus = self.beta[4 * self.dim:5 * self.dim]\n r1_global = self.beta[5 * self.dim]\n r2_global = self.beta[5 * self.dim + 1]\n sigma = self.beta[5 * self.dim + 2]\n w0 = self.beta[5 * self.dim + 3]\n tau = r1_global * torch.sqrt(r2_global)\n lamb = r1_local * torch.sqrt(r2_local)\n lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus)\n w = z * lamb * lambda_plus * tau\n outy = (self.y - (w0 + self.X.mv(w))) * (self.y - (w0 + self.X.mv(w))\n ) / (sigma * sigma) * 0.5\n outz = torch.dot(z, z) * 0.5\n outr1_local = torch.dot(r1_local, r1_local)\n outr2_local = ((0.5 * self.nu + 1) * torch.log(r2_local) + 0.5 *\n self.nu / r2_local).sum()\n outr1_global = r1_global * r1_global * 0.5\n outr2_global = 1.5 * torch.log(r2_global) + 0.5 / r2_global\n outw0 = w0 * w0 / 25.0\n out = (outy + outz + outr1_local + outr2_local + outr1_global +\n outr2_global + outw0)\n return out\n\n def load_explcit_gradient(self):\n return ()\n", "step-5": "from abstract_class_V import V\nimport torch\nimport torch.nn as nn\n\n\nclass V_test_abstract(V):\n def __init__(self):\n super(V_test_abstract, self).__init__()\n\n def V_setup(self,y,X,nu):\n self.explicit_gradient = False\n self.need_higherorderderiv = True\n self.dim = X.shape[1]\n self.beta = nn.Parameter(torch.zeros(self.dim*5+4),requires_grad=True)\n self.y = y\n self.X = X\n self.nu = nu\n\n # beta[:dim] = z\n # beta[(dim):(2dim)] = r1_local\n # beta[(2dim):(3dim)] = r2_local\n # beta[(3dim):(4dim)] = r1_local_plus\n # beta[(4dim):(5dim)] = r2_local_plus\n # beta[5dim] = r1_global\n # beta[5dim+1] = r2_global\n # beta[5dim+2] = sigma\n # beta[5dim+3] = w0\n return()\n\n def forward(self):\n z = self.beta[:self.dim]\n r1_local = self.beta[(self.dim):(2*self.dim)]\n r2_local = self.beta[(2*self.dim):(3*self.dim)]\n r1_local_plus = self.beta[(3*self.dim):(4*self.dim)]\n r2_local_plus = self.beta[(4*self.dim):(5*self.dim)]\n r1_global = self.beta[5*self.dim]\n r2_global = self.beta[5*self.dim+1]\n sigma = self.beta[5*self.dim+2]\n w0 = self.beta[5*self.dim+3]\n\n tau = r1_global * torch.sqrt(r2_global)\n lamb = r1_local * torch.sqrt(r2_local)\n lambda_plus = r1_local_plus * torch.sqrt(r2_local_plus)\n w = z * lamb * lambda_plus * tau\n\n outy = (self.y - (w0 + self.X.mv(w)))*(self.y - (w0 + self.X.mv(w)))/(sigma*sigma) * 0.5\n outz = torch.dot(z,z) * 0.5\n outr1_local = torch.dot(r1_local,r1_local)\n outr2_local = ((0.5*self.nu+1)*torch.log(r2_local) + 0.5 * self.nu/r2_local).sum()\n outr1_global = r1_global*r1_global * 0.5\n outr2_global = 1.5 * torch.log(r2_global) + 0.5/r2_global\n outw0 = w0*w0/(25.)\n out = outy+outz+outr1_local+outr2_local+outr1_global+outr2_global+outw0\n return(out)\n\n def load_explcit_gradient(self):\n return()", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. from ..backend.ir_to_caffe import CaffeConverter from ..converter_ir.ir_transform import IRTransform, TransformerRule from ..frontend.mge_to_ir import MGE_FrontEnd def mge_to_caffe( mge_fpath, prototxt="out.prototxt", caffemodel="out.caffemodel", outspec=None, use_empty_blobs=False, ): assert isinstance(mge_fpath, str), "mge_fpath must be string" irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve() transformer_options = [ TransformerRule.EXPAND_MUL_ADD3, TransformerRule.FUSE_FOR_LEAKY_RELU, ] transformer = IRTransform(transformer_options) transformed_irgraph = transformer.transform(irgraph) converter = CaffeConverter(transformed_irgraph, use_empty_blobs) converter.convert() assert isinstance(prototxt, str) and isinstance( caffemodel, str ), "'prototxt' and 'caffemodel' must be string" converter.dump(prototxt, caffemodel)
normal
{ "blob_id": "a83230e71cc1bcc843d00487746f16114d304eec", "index": 4908, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=\n 'out.caffemodel', outspec=None, use_empty_blobs=False):\n assert isinstance(mge_fpath, str), 'mge_fpath must be string'\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule\n .FUSE_FOR_LEAKY_RELU]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n assert isinstance(prototxt, str) and isinstance(caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n", "step-3": "from ..backend.ir_to_caffe import CaffeConverter\nfrom ..converter_ir.ir_transform import IRTransform, TransformerRule\nfrom ..frontend.mge_to_ir import MGE_FrontEnd\n\n\ndef mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=\n 'out.caffemodel', outspec=None, use_empty_blobs=False):\n assert isinstance(mge_fpath, str), 'mge_fpath must be string'\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule\n .FUSE_FOR_LEAKY_RELU]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n assert isinstance(prototxt, str) and isinstance(caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n", "step-4": "# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nfrom ..backend.ir_to_caffe import CaffeConverter\nfrom ..converter_ir.ir_transform import IRTransform, TransformerRule\nfrom ..frontend.mge_to_ir import MGE_FrontEnd\n\n\ndef mge_to_caffe(\n mge_fpath,\n prototxt=\"out.prototxt\",\n caffemodel=\"out.caffemodel\",\n outspec=None,\n use_empty_blobs=False,\n):\n assert isinstance(mge_fpath, str), \"mge_fpath must be string\"\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n\n transformer_options = [\n TransformerRule.EXPAND_MUL_ADD3,\n TransformerRule.FUSE_FOR_LEAKY_RELU,\n ]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n\n assert isinstance(prototxt, str) and isinstance(\n caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/home/liud/anaconda3/envs/python/bin/python # -*- coding: utf-8 -*- ''' 线性回归 公式:W = 1/(xTx) * xT * y ''' #导入的包 import numpy as np from numpy import linalg from numpy import corrcoef from sklearn import linear_model import matplotlib.pyplot as plt #加载数据 def loadDataSet(filename): xList = [] yList = [] with open(filename) as fn: for i in fn: x = i.rstrip().split("\t") #x = map(eval, x) 此函数eval容易造成恶意输入 x = map(eval, x) xList.append(x[: -1]) yList.append(float(x[-1])) return xList, yList ''' def loadDataSet(filename): numFeat = len(open(filename).readline().split("\t")) - 1 dataMat = [] labelMat= [] fr = open(filename) for line in fr.readlines(): lineArr = [] curLine = line.strip().split("\t") for i in range(numFeat): lineArr.append(float(curLine[i])) dataMat.append(lineArr) labelMat.append(float(curLine[-1])) return dataMat, labelMat ''' #计算最佳拟合直线,得到模型参数 def standRegress(xList, yList): xArr = np.array(xList) yArr = np.transpose([yList]) #将yList转化成列向量 xTx = np.dot(xArr.T, xArr) if linalg.det(xTx) == 0: #判断是否为非奇异矩阵 print "这个矩阵是奇异矩阵,行列式为0" return ws = np.dot(np.linalg.inv(xTx), np.dot(xArr.T, yArr)) return ws #sklearn的写法 def sklearn_standRegress(xList, yList): clf = linear_model.LinearRegression(fit_intercept = False) #加载线性回归模型,且让w0 = 0(w0指的是intercept) clf.fit(xList, yList) #拟合 #print clf.intercept_ return np.transpose([clf.coef_]) #返回系数的列向量形式 #展示结果 def show(xList, yList, w): xArr = np.array(xList) yArr = np.transpose([yList]) fig = plt.figure() #创建一幅图 ax = fig.add_subplot(1, 1, 1) ax.scatter(xArr[:, 1:2].flatten(), yArr.flatten()) xCopy = xArr.copy() xCopy.sort(0) yPredict = np.dot(xCopy, w) #print yPredict.flatten() ax.plot(xCopy[:, 1], yPredict.flatten()) plt.show() #主函数 def main(): xList, yList = loadDataSet("/home/liud/PycharmProjects/Machine_Learning/Regression/data/ex0.txt") _, n = np.shape(xList) ws = np.zeros((n, 1)) while(1): print '请输入你选择的方式(1.sklearn;2.regression自己实现的线性回归)' selectStyle = raw_input() if selectStyle == '1': # sklearn的实现 ws = sklearn_standRegress(xList, yList) break elif selectStyle == '2': # 自己按理解实现 ws = standRegress(xList, yList) break else: print '错误输入,请重新输入' print "最小二乘法得出的回归系数: \n", ws show(xList, yList, ws) yPredict = np.dot(xList, ws) print "相关性:", corrcoef(yPredict.T.tolist(), yList) #corrcoef中的两个参数尽可能的类型相似,yList是list,因此yPredict是numpy.ndarray且为二维的列向量。 if __name__ == '__main__': main() print 'Success'
normal
{ "blob_id": "a6eab1e5e7985de917d707c904fcd90f223c108c", "index": 2559, "step-1": "#!/home/liud/anaconda3/envs/python/bin/python\n# -*- coding: utf-8 -*-\n'''\n\t线性回归\n\t公式:W = 1/(xTx) * xT * y\n'''\n#导入的包\nimport numpy as np\nfrom numpy import linalg\nfrom numpy import corrcoef\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n\n#加载数据\ndef loadDataSet(filename):\n\txList = []\n\tyList = []\n\twith open(filename) as fn:\n\t\tfor i in fn:\n\t\t\tx = i.rstrip().split(\"\\t\")\n\t\t\t#x = map(eval, x) 此函数eval容易造成恶意输入\n\t\t\tx = map(eval, x)\n\t\t\txList.append(x[: -1])\n\t\t\tyList.append(float(x[-1]))\n\treturn xList, yList\n'''\ndef loadDataSet(filename):\n\tnumFeat = len(open(filename).readline().split(\"\\t\")) - 1\n\tdataMat = []\n\tlabelMat= []\n\tfr = open(filename)\n\tfor line in fr.readlines():\n\t\tlineArr = []\n\t\tcurLine = line.strip().split(\"\\t\")\n\t\tfor i in range(numFeat):\n\t\t\tlineArr.append(float(curLine[i]))\n\t\tdataMat.append(lineArr)\n\t\tlabelMat.append(float(curLine[-1]))\n\treturn dataMat, labelMat\n'''\n#计算最佳拟合直线,得到模型参数\ndef standRegress(xList, yList):\n\txArr = np.array(xList)\n\tyArr = np.transpose([yList]) #将yList转化成列向量\n\txTx = np.dot(xArr.T, xArr)\n\tif linalg.det(xTx) == 0: #判断是否为非奇异矩阵\n\t\tprint \"这个矩阵是奇异矩阵,行列式为0\"\n\t\treturn\n\tws = np.dot(np.linalg.inv(xTx), np.dot(xArr.T, yArr))\n\treturn ws\n\n#sklearn的写法\ndef sklearn_standRegress(xList, yList):\n\tclf = linear_model.LinearRegression(fit_intercept = False) #加载线性回归模型,且让w0 = 0(w0指的是intercept)\n\tclf.fit(xList, yList) #拟合\n\t#print clf.intercept_\n\treturn np.transpose([clf.coef_]) #返回系数的列向量形式\n\n#展示结果\ndef show(xList, yList, w):\n\txArr = np.array(xList)\n\tyArr = np.transpose([yList])\n\tfig = plt.figure() #创建一幅图\n\tax = fig.add_subplot(1, 1, 1)\n\tax.scatter(xArr[:, 1:2].flatten(), yArr.flatten())\n\txCopy = xArr.copy()\n\txCopy.sort(0)\n\tyPredict = np.dot(xCopy, w)\n\t#print yPredict.flatten()\n\tax.plot(xCopy[:, 1], yPredict.flatten())\n\tplt.show()\n\n#主函数\ndef main():\n\txList, yList = loadDataSet(\"/home/liud/PycharmProjects/Machine_Learning/Regression/data/ex0.txt\")\n\t_, n = np.shape(xList)\n\tws = np.zeros((n, 1))\n\twhile(1):\n\t\tprint '请输入你选择的方式(1.sklearn;2.regression自己实现的线性回归)'\n\t\tselectStyle = raw_input()\n\t\tif selectStyle == '1':\n\t\t\t# sklearn的实现\n\t\t\tws = sklearn_standRegress(xList, yList)\n\t\t\tbreak\n\t\telif selectStyle == '2':\n\t\t\t# 自己按理解实现\n\t\t\tws = standRegress(xList, yList)\n\t\t\tbreak\n\t\telse:\n\t\t\tprint '错误输入,请重新输入'\n\tprint \"最小二乘法得出的回归系数: \\n\", ws\n\tshow(xList, yList, ws)\n\tyPredict = np.dot(xList, ws)\n\tprint \"相关性:\", corrcoef(yPredict.T.tolist(), yList) #corrcoef中的两个参数尽可能的类型相似,yList是list,因此yPredict是numpy.ndarray且为二维的列向量。\n\nif __name__ == '__main__':\n\tmain()\n\tprint 'Success'", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import spacy from vaderSentiment import vaderSentiment from flask import Flask, render_template, request app = Flask(__name__) @app.route('/') def hello(): return render_template('index.html') @app.route('/',methods=['POST']) def func(): st=request.form["review"] if(st==''): return render_template('index.html') english = spacy.load("en_core_web_sm") result = english(st) sentences = [str(s) for s in result.sents] analyzer = vaderSentiment.SentimentIntensityAnalyzer() sentiment = [analyzer.polarity_scores(str(s)) for s in sentences] if(sentiment[0]['compound'] >= 0.05) : sent="Positive " emoji=128512 address=' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg' elif(sentiment[0]['compound'] <= - 0.05) : sent="Negative " emoji=128577 address='https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg ' else : sent="Neutral " emoji=128528 address='https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg ' return render_template('output.html', sentence=st, sent=sent, emoji=emoji, address=address) @app.route('/fu.html') def result(): return render_template('fu.html') @app.route('/new.html') def new(): return render_template('new.html') if __name__ == '__main__': app.run(debug=True)
normal
{ "blob_id": "2d7f7cb66480ecb8335949687854554679026959", "index": 9988, "step-1": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef func():\n st = request.form['review']\n if st == '':\n return render_template('index.html')\n english = spacy.load('en_core_web_sm')\n result = english(st)\n sentences = [str(s) for s in result.sents]\n analyzer = vaderSentiment.SentimentIntensityAnalyzer()\n sentiment = [analyzer.polarity_scores(str(s)) for s in sentences]\n if sentiment[0]['compound'] >= 0.05:\n sent = 'Positive '\n emoji = 128512\n address = (\n ' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg'\n )\n elif sentiment[0]['compound'] <= -0.05:\n sent = 'Negative '\n emoji = 128577\n address = (\n 'https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg '\n )\n else:\n sent = 'Neutral '\n emoji = 128528\n address = (\n 'https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg '\n )\n return render_template('output.html', sentence=st, sent=sent, emoji=\n emoji, address=address)\n\n\n@app.route('/fu.html')\ndef result():\n return render_template('fu.html')\n\n\n@app.route('/new.html')\ndef new():\n return render_template('new.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef func():\n st = request.form['review']\n if st == '':\n return render_template('index.html')\n english = spacy.load('en_core_web_sm')\n result = english(st)\n sentences = [str(s) for s in result.sents]\n analyzer = vaderSentiment.SentimentIntensityAnalyzer()\n sentiment = [analyzer.polarity_scores(str(s)) for s in sentences]\n if sentiment[0]['compound'] >= 0.05:\n sent = 'Positive '\n emoji = 128512\n address = (\n ' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg'\n )\n elif sentiment[0]['compound'] <= -0.05:\n sent = 'Negative '\n emoji = 128577\n address = (\n 'https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg '\n )\n else:\n sent = 'Neutral '\n emoji = 128528\n address = (\n 'https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg '\n )\n return render_template('output.html', sentence=st, sent=sent, emoji=\n emoji, address=address)\n\n\n@app.route('/fu.html')\ndef result():\n return render_template('fu.html')\n\n\n@app.route('/new.html')\ndef new():\n return render_template('new.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef func():\n st = request.form['review']\n if st == '':\n return render_template('index.html')\n english = spacy.load('en_core_web_sm')\n result = english(st)\n sentences = [str(s) for s in result.sents]\n analyzer = vaderSentiment.SentimentIntensityAnalyzer()\n sentiment = [analyzer.polarity_scores(str(s)) for s in sentences]\n if sentiment[0]['compound'] >= 0.05:\n sent = 'Positive '\n emoji = 128512\n address = (\n ' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg'\n )\n elif sentiment[0]['compound'] <= -0.05:\n sent = 'Negative '\n emoji = 128577\n address = (\n 'https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg '\n )\n else:\n sent = 'Neutral '\n emoji = 128528\n address = (\n 'https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg '\n )\n return render_template('output.html', sentence=st, sent=sent, emoji=\n emoji, address=address)\n\n\n@app.route('/fu.html')\ndef result():\n return render_template('fu.html')\n\n\n@app.route('/new.html')\ndef new():\n return render_template('new.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "import spacy\nfrom vaderSentiment import vaderSentiment\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef func():\n st = request.form['review']\n if st == '':\n return render_template('index.html')\n english = spacy.load('en_core_web_sm')\n result = english(st)\n sentences = [str(s) for s in result.sents]\n analyzer = vaderSentiment.SentimentIntensityAnalyzer()\n sentiment = [analyzer.polarity_scores(str(s)) for s in sentences]\n if sentiment[0]['compound'] >= 0.05:\n sent = 'Positive '\n emoji = 128512\n address = (\n ' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg'\n )\n elif sentiment[0]['compound'] <= -0.05:\n sent = 'Negative '\n emoji = 128577\n address = (\n 'https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg '\n )\n else:\n sent = 'Neutral '\n emoji = 128528\n address = (\n 'https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg '\n )\n return render_template('output.html', sentence=st, sent=sent, emoji=\n emoji, address=address)\n\n\n@app.route('/fu.html')\ndef result():\n return render_template('fu.html')\n\n\n@app.route('/new.html')\ndef new():\n return render_template('new.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "import spacy\nfrom vaderSentiment import vaderSentiment\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n \n@app.route('/',methods=['POST'])\ndef func():\n st=request.form[\"review\"]\n if(st==''):\n return render_template('index.html')\n english = spacy.load(\"en_core_web_sm\")\n result = english(st)\n sentences = [str(s) for s in result.sents]\n analyzer = vaderSentiment.SentimentIntensityAnalyzer()\n sentiment = [analyzer.polarity_scores(str(s)) for s in sentences]\n \n \n if(sentiment[0]['compound'] >= 0.05) : \n sent=\"Positive \" \n emoji=128512\n address=' https://st.depositphotos.com/1016482/2236/i/950/depositphotos_22362437-stock-photo-background-with-heap-of-yellow.jpg'\n \n elif(sentiment[0]['compound'] <= - 0.05) : \n sent=\"Negative \"\n emoji=128577\n address='https://www.ecopetit.cat/wpic/mpic/270-2706765_sad-emoji-cover-photo-for-fb.jpg '\n \n else :\n sent=\"Neutral \"\n emoji=128528\n address='https://atlas-content-cdn.pixelsquid.com/stock-images/neutral-face-facial-expression-L63Mrq1-600.jpg '\n \n \n return render_template('output.html', sentence=st, sent=sent, emoji=emoji, address=address)\n \n\n@app.route('/fu.html')\ndef result():\n return render_template('fu.html')\n\n@app.route('/new.html')\ndef new():\n return render_template('new.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# -*- coding: utf-8 -*- """ Created on Tue Dec 31 05:48:57 2019 @author: emama """ import datetime as dt t = dt.datetime.today() print(t)
normal
{ "blob_id": "b1fbc8f3616b70e5d35898fd895c37e838c87dc9", "index": 9293, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(t)\n", "step-3": "<mask token>\nt = dt.datetime.today()\nprint(t)\n", "step-4": "<mask token>\nimport datetime as dt\nt = dt.datetime.today()\nprint(t)\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 31 05:48:57 2019\r\n\r\n@author: emama\r\n\"\"\"\r\n\r\nimport datetime as dt\r\n\r\nt = dt.datetime.today()\r\nprint(t)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
print("hello world") print("lol") print("new changes in vis")
normal
{ "blob_id": "6c88e55a76cbd84cee0ebd6c51d930cc2da100d2", "index": 2945, "step-1": "<mask token>\n", "step-2": "print('hello world')\nprint('lol')\nprint('new changes in vis')\n", "step-3": "print(\"hello world\")\nprint(\"lol\")\nprint(\"new changes in vis\")", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import pytest from apistar import App, Route, TestClient, exceptions from apistar_request_id import RequestId, RequestIdHooks def index() -> dict: return {} def fail() -> dict: raise exceptions.BadRequest("fail") def fail_2() -> dict: raise RuntimeError("fail") routes = [ Route("/", method="GET", handler=index), Route("/fail", method="GET", handler=fail), Route("/fail-2", method="GET", handler=fail_2), ] event_hooks = [ RequestIdHooks() ] @pytest.fixture(scope="session") def app(): return App(routes=routes, event_hooks=event_hooks) @pytest.fixture def client(app): return TestClient(app) def test_request_id_can_be_autogenerated(client): # Given that I don't have an existing request id # When I make a request to the app response = client.get("/") # Then my response should contain an autogenerated request id assert response.headers["x-request-id"] # And the request id for the current thread should be cleared assert RequestId.get_request_id() is None def test_request_id_can_be_set_from_request_headers(client): # Given that I have an existing request id # When I make a request to the app response = client.get("/", headers={"x-request-id": "a-request-id"}) # Then that same request id should appear in the response headers assert response.headers["x-request-id"] == "a-request-id" # And the request id for the current thread should be cleared assert RequestId.get_request_id() is None def test_request_id_can_be_set_on_error(client): # Given that I have an existing request id # When I make a request to the app response = client.get("/fail", headers={"x-request-id": "a-request-id"}) # Then that same request id should appear in the response headers assert response.headers["x-request-id"] == "a-request-id" # And the request id for the current thread should be cleared assert RequestId.get_request_id() is None def test_request_id_can_be_set_on_internal_error(client): # Given that I have an existing request id # When I make a request to the app with pytest.raises(RuntimeError): client.get("/fail-2", headers={"x-request-id": "a-request-id"}) # Then the request id should be set and subsequently cleared assert RequestId.get_request_id() is None
normal
{ "blob_id": "f41ab6813fb7067089abe223b9006adde40630cd", "index": 1941, "step-1": "<mask token>\n\n\ndef index() ->dict:\n return {}\n\n\n<mask token>\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\ndef test_request_id_can_be_autogenerated(client):\n response = client.get('/')\n assert response.headers['x-request-id']\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_from_request_headers(client):\n response = client.get('/', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_error(client):\n response = client.get('/fail', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_internal_error(client):\n with pytest.raises(RuntimeError):\n client.get('/fail-2', headers={'x-request-id': 'a-request-id'})\n assert RequestId.get_request_id() is None\n", "step-2": "<mask token>\n\n\ndef index() ->dict:\n return {}\n\n\ndef fail() ->dict:\n raise exceptions.BadRequest('fail')\n\n\ndef fail_2() ->dict:\n raise RuntimeError('fail')\n\n\n<mask token>\n\n\n@pytest.fixture(scope='session')\ndef app():\n return App(routes=routes, event_hooks=event_hooks)\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\ndef test_request_id_can_be_autogenerated(client):\n response = client.get('/')\n assert response.headers['x-request-id']\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_from_request_headers(client):\n response = client.get('/', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_error(client):\n response = client.get('/fail', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_internal_error(client):\n with pytest.raises(RuntimeError):\n client.get('/fail-2', headers={'x-request-id': 'a-request-id'})\n assert RequestId.get_request_id() is None\n", "step-3": "<mask token>\n\n\ndef index() ->dict:\n return {}\n\n\ndef fail() ->dict:\n raise exceptions.BadRequest('fail')\n\n\ndef fail_2() ->dict:\n raise RuntimeError('fail')\n\n\nroutes = [Route('/', method='GET', handler=index), Route('/fail', method=\n 'GET', handler=fail), Route('/fail-2', method='GET', handler=fail_2)]\nevent_hooks = [RequestIdHooks()]\n\n\n@pytest.fixture(scope='session')\ndef app():\n return App(routes=routes, event_hooks=event_hooks)\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\ndef test_request_id_can_be_autogenerated(client):\n response = client.get('/')\n assert response.headers['x-request-id']\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_from_request_headers(client):\n response = client.get('/', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_error(client):\n response = client.get('/fail', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_internal_error(client):\n with pytest.raises(RuntimeError):\n client.get('/fail-2', headers={'x-request-id': 'a-request-id'})\n assert RequestId.get_request_id() is None\n", "step-4": "import pytest\nfrom apistar import App, Route, TestClient, exceptions\nfrom apistar_request_id import RequestId, RequestIdHooks\n\n\ndef index() ->dict:\n return {}\n\n\ndef fail() ->dict:\n raise exceptions.BadRequest('fail')\n\n\ndef fail_2() ->dict:\n raise RuntimeError('fail')\n\n\nroutes = [Route('/', method='GET', handler=index), Route('/fail', method=\n 'GET', handler=fail), Route('/fail-2', method='GET', handler=fail_2)]\nevent_hooks = [RequestIdHooks()]\n\n\n@pytest.fixture(scope='session')\ndef app():\n return App(routes=routes, event_hooks=event_hooks)\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\ndef test_request_id_can_be_autogenerated(client):\n response = client.get('/')\n assert response.headers['x-request-id']\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_from_request_headers(client):\n response = client.get('/', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_error(client):\n response = client.get('/fail', headers={'x-request-id': 'a-request-id'})\n assert response.headers['x-request-id'] == 'a-request-id'\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_internal_error(client):\n with pytest.raises(RuntimeError):\n client.get('/fail-2', headers={'x-request-id': 'a-request-id'})\n assert RequestId.get_request_id() is None\n", "step-5": "import pytest\n\nfrom apistar import App, Route, TestClient, exceptions\nfrom apistar_request_id import RequestId, RequestIdHooks\n\n\ndef index() -> dict:\n return {}\n\n\ndef fail() -> dict:\n raise exceptions.BadRequest(\"fail\")\n\n\ndef fail_2() -> dict:\n raise RuntimeError(\"fail\")\n\n\nroutes = [\n Route(\"/\", method=\"GET\", handler=index),\n Route(\"/fail\", method=\"GET\", handler=fail),\n Route(\"/fail-2\", method=\"GET\", handler=fail_2),\n]\n\nevent_hooks = [\n RequestIdHooks()\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef app():\n return App(routes=routes, event_hooks=event_hooks)\n\n\n@pytest.fixture\ndef client(app):\n return TestClient(app)\n\n\ndef test_request_id_can_be_autogenerated(client):\n # Given that I don't have an existing request id\n # When I make a request to the app\n response = client.get(\"/\")\n\n # Then my response should contain an autogenerated request id\n assert response.headers[\"x-request-id\"]\n\n # And the request id for the current thread should be cleared\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_from_request_headers(client):\n # Given that I have an existing request id\n # When I make a request to the app\n response = client.get(\"/\", headers={\"x-request-id\": \"a-request-id\"})\n\n # Then that same request id should appear in the response headers\n assert response.headers[\"x-request-id\"] == \"a-request-id\"\n\n # And the request id for the current thread should be cleared\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_error(client):\n # Given that I have an existing request id\n # When I make a request to the app\n response = client.get(\"/fail\", headers={\"x-request-id\": \"a-request-id\"})\n\n # Then that same request id should appear in the response headers\n assert response.headers[\"x-request-id\"] == \"a-request-id\"\n\n # And the request id for the current thread should be cleared\n assert RequestId.get_request_id() is None\n\n\ndef test_request_id_can_be_set_on_internal_error(client):\n # Given that I have an existing request id\n # When I make a request to the app\n with pytest.raises(RuntimeError):\n client.get(\"/fail-2\", headers={\"x-request-id\": \"a-request-id\"})\n\n # Then the request id should be set and subsequently cleared\n assert RequestId.get_request_id() is None\n", "step-ids": [ 6, 9, 10, 11, 12 ] }
[ 6, 9, 10, 11, 12 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> s.connect((HOST, PORT)) <|reserved_special_token_1|> <|reserved_special_token_0|> HOST = '127.0.0.1' PORT = 4444 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) <|reserved_special_token_1|> import socket HOST = '127.0.0.1' PORT = 4444 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) <|reserved_special_token_1|> #!/bin/python3 import socket HOST = '127.0.0.1' PORT= 4444 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST,PORT))
flexible
{ "blob_id": "14a39b9aa56777c8198794fe2f51c9a068500743", "index": 4075, "step-1": "<mask token>\n", "step-2": "<mask token>\ns.connect((HOST, PORT))\n", "step-3": "<mask token>\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n", "step-4": "import socket\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n", "step-5": "#!/bin/python3\nimport socket\nHOST = '127.0.0.1'\nPORT= 4444\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST,PORT))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TGAbstractRegistry(ABC): <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TGAbstractRegistry(ABC): def __init__(self): self.rule_engine = TGLoggingRuleEngineFactory().create() self.logger = logging.getLogger() self.event_distributor = TGEventDistributor(logging.getLogger()) self.handler_map_factory = TGHandlerMapFactory().create() <|reserved_special_token_1|> import logging from abc import ABC from thraxisgamespatterns.application.handler_map_factory import TGHandlerMapFactory from thraxisgamespatterns.eventhandling.event_distributor import TGEventDistributor from thraxisgamespatterns.factories.logging_rule_engine_factory import TGLoggingRuleEngineFactory class TGAbstractRegistry(ABC): def __init__(self): self.rule_engine = TGLoggingRuleEngineFactory().create() self.logger = logging.getLogger() self.event_distributor = TGEventDistributor(logging.getLogger()) self.handler_map_factory = TGHandlerMapFactory().create()
flexible
{ "blob_id": "d499b4e189a0c3c6efa6a07871dbc6c2996a2dcb", "index": 2245, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass TGAbstractRegistry(ABC):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TGAbstractRegistry(ABC):\n\n def __init__(self):\n self.rule_engine = TGLoggingRuleEngineFactory().create()\n self.logger = logging.getLogger()\n self.event_distributor = TGEventDistributor(logging.getLogger())\n self.handler_map_factory = TGHandlerMapFactory().create()\n", "step-4": "import logging\nfrom abc import ABC\nfrom thraxisgamespatterns.application.handler_map_factory import TGHandlerMapFactory\nfrom thraxisgamespatterns.eventhandling.event_distributor import TGEventDistributor\nfrom thraxisgamespatterns.factories.logging_rule_engine_factory import TGLoggingRuleEngineFactory\n\n\nclass TGAbstractRegistry(ABC):\n\n def __init__(self):\n self.rule_engine = TGLoggingRuleEngineFactory().create()\n self.logger = logging.getLogger()\n self.event_distributor = TGEventDistributor(logging.getLogger())\n self.handler_map_factory = TGHandlerMapFactory().create()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> admin.site.register(Post) <|reserved_special_token_1|> from django.contrib import admin from trips.models import Post admin.site.register(Post)
flexible
{ "blob_id": "a8197a4f0bb84e734696bf43fa976c76732d75b8", "index": 9863, "step-1": "<mask token>\n", "step-2": "<mask token>\nadmin.site.register(Post)\n", "step-3": "from django.contrib import admin\nfrom trips.models import Post\nadmin.site.register(Post)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
config_info = {'n_input': 1, 'num_layers': 1, 'features': 20, 'sequence_length': 1344, 'num_steps': None, 'lstm_size': None, 'batch_size': None, 'init_learning_rate': None, 'learning_rate_decay': None, 'init_epoch': None, 'max_epoch': None, 'dropout_rate': None}
normal
{ "blob_id": "8ede786526f4b730173777d9d3b9c7e4554fc887", "index": 2443, "step-1": "<mask token>\n", "step-2": "config_info = {'n_input': 1, 'num_layers': 1, 'features': 20,\n 'sequence_length': 1344, 'num_steps': None, 'lstm_size': None,\n 'batch_size': None, 'init_learning_rate': None, 'learning_rate_decay':\n None, 'init_epoch': None, 'max_epoch': None, 'dropout_rate': None}\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]