code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import sys
input = sys.stdin.readline
from collections import deque
size, num = map(int, input().split())
position = list(map(int, input().split()))
cnt = 0
nums = []
for k in range(1, size + 1):
nums.append(k)
size = deque(nums)
position = deque(position)
while position != deque([]):
if position[0] == 1:
size.popleft()
position.popleft()
for i in range(len(position)):
position[i] -= 1
else:
right = 0
left = 0
if position[0] <= (len(size) + 2) // 2:
size.rotate(-1)
cnt += 1
for i in range(len(position)):
position[i] -= 1
if position[i] <= 0:
position[i] = len(size)
else:
size.rotate(1)
cnt += 1
for i in range(len(position)):
position[i] += 1
if position[i] > len(size):
position[i] = 1
print(cnt)
|
normal
|
{
"blob_id": "c0c0ed31a09f2b49448bc1f3519aa61daaba20af",
"index": 5023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor k in range(1, size + 1):\n nums.append(k)\n<mask token>\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\n<mask token>\nsize, num = map(int, input().split())\nposition = list(map(int, input().split()))\ncnt = 0\nnums = []\nfor k in range(1, size + 1):\n nums.append(k)\nsize = deque(nums)\nposition = deque(position)\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-4": "import sys\ninput = sys.stdin.readline\nfrom collections import deque\nsize, num = map(int, input().split())\nposition = list(map(int, input().split()))\ncnt = 0\nnums = []\nfor k in range(1, size + 1):\n nums.append(k)\nsize = deque(nums)\nposition = deque(position)\nwhile position != deque([]):\n if position[0] == 1:\n size.popleft()\n position.popleft()\n for i in range(len(position)):\n position[i] -= 1\n else:\n right = 0\n left = 0\n if position[0] <= (len(size) + 2) // 2:\n size.rotate(-1)\n cnt += 1\n for i in range(len(position)):\n position[i] -= 1\n if position[i] <= 0:\n position[i] = len(size)\n else:\n size.rotate(1)\n cnt += 1\n for i in range(len(position)):\n position[i] += 1\n if position[i] > len(size):\n position[i] = 1\nprint(cnt)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from scrapy import Request
from ..items import ZhilianSpiderItem
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
class ZhilianSpider(RedisCrawlSpider):
name = 'zhilianspider'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
rules = [
Rule(LinkExtractor(restrict_xpaths='/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'), follow=True),
Rule(LinkExtractor(allow=r'http://jobs.zhaopin.com/(\d.+).htm'), callback='parse_zhilian')
]
def start_requests(self):
url = 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath('/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first().strip()
item['job_education'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()).strip()
item['job_category'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
|
normal
|
{
"blob_id": "894fa01e16d200add20f614fd4a5ee9071777db9",
"index": 3339,
"step-1": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-3": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n rules = [Rule(LinkExtractor(restrict_xpaths=\n '/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'\n ), follow=True), Rule(LinkExtractor(allow=\n 'http://jobs.zhaopin.com/(\\\\d.+).htm'), callback='parse_zhilian')]\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-4": "from scrapy import Request\nfrom ..items import ZhilianSpiderItem\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n rules = [Rule(LinkExtractor(restrict_xpaths=\n '/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'\n ), follow=True), Rule(LinkExtractor(allow=\n 'http://jobs.zhaopin.com/(\\\\d.+).htm'), callback='parse_zhilian')]\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom scrapy import Request\nfrom ..items import ZhilianSpiderItem\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n\n rules = [\n Rule(LinkExtractor(restrict_xpaths='/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'), follow=True),\n Rule(LinkExtractor(allow=r'http://jobs.zhaopin.com/(\\d.+).htm'), callback='parse_zhilian')\n ]\n\n def start_requests(self):\n url = 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n\n item['job_id'] = response.url\n\n item['job_name'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n\n item['job_company'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n\n item['job_salary'] = response.xpath('/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first().strip()\n\n item['job_education'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n\n item['job_address'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()).strip()\n\n item['job_category'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n\n item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n if text:\n item['company_profile'] = text\n\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n yield item\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('cart', '0010_auto_20200518_1718')]
operations = [migrations.AlterField(model_name='order', name=
'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))
]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('cart', '0010_auto_20200518_1718')]
operations = [migrations.AlterField(model_name='order', name=
'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))
]
<|reserved_special_token_1|>
# Generated by Django 3.0.5 on 2020-05-18 12:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0010_auto_20200518_1718'),
]
operations = [
migrations.AlterField(
model_name='order',
name='fianl_code',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
flexible
|
{
"blob_id": "da783355c5f888a66f623fa7eeeaf0e4e9fcfa48",
"index": 4982,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cart', '0010_auto_20200518_1718')]\n operations = [migrations.AlterField(model_name='order', name=\n 'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))\n ]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cart', '0010_auto_20200518_1718')]\n operations = [migrations.AlterField(model_name='order', name=\n 'fianl_code', field=models.PositiveIntegerField(blank=True, null=True))\n ]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-05-18 12:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cart', '0010_auto_20200518_1718'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='fianl_code',\n field=models.PositiveIntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
{% load code_generator_tags %}from rest_framework.serializers import ModelSerializer
{% from_module_import app.name|add:'.models' models %}{% comment %}
{% endcomment %}{% for model in models %}
class {{ model.name }}Serializer(ModelSerializer):
class Meta:
model = {{ model.name }}
depth = 1
fields = (
{% indent_items model.field_names 12 quote='simple' %}
)
read_only_fields = (){% comment %}
{% endcomment %}{% endfor %}
|
normal
|
{
"blob_id": "888ec915d89f1fd8fd6465f1035f7c658af78596",
"index": 6166,
"step-1": "{% load code_generator_tags %}from rest_framework.serializers import ModelSerializer\n{% from_module_import app.name|add:'.models' models %}{% comment %}\n{% endcomment %}{% for model in models %}\n\n\nclass {{ model.name }}Serializer(ModelSerializer):\n class Meta:\n model = {{ model.name }}\n depth = 1\n fields = (\n {% indent_items model.field_names 12 quote='simple' %}\n )\n read_only_fields = (){% comment %}\n{% endcomment %}{% endfor %}\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class MIPCLSolver(Solver):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def extracHistory(self, line: str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MIPCLSolver(Solver):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, **kw):
super(MIPCLSolver, self).__init__(**kw)
def extracHistory(self, line: str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MIPCLSolver(Solver):
solverId = 'MIPCL'
recognition_expr = re.compile('Reading data')
primalbound_expr = re.compile('Objective value: (\\S*)')
dualbound_expr = re.compile(
'^(?:\\s*lower-bound: |Objective value: )(\\S+)')
solvingtime_expr = re.compile('Solution time: (\\S*)')
version_expr = re.compile('MIPCL version (\\S*)')
solverstatusmap = {'Objective value: (\\S*) - optimality proven': Key.
SolverStatusCodes.Optimal, 'This problem is infeasible': Key.
SolverStatusCodes.Infeasible, 'Time limit reached': Key.
SolverStatusCodes.TimeLimit}
inTable = False
primalboundhistory_exp = re.compile(
'^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%'
)
endtable = re.compile('^===========================================')
def __init__(self, **kw):
super(MIPCLSolver, self).__init__(**kw)
def extracHistory(self, line: str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from ipet.parsing.Solver import Solver
import re
from ipet import Key
from ipet import misc
class MIPCLSolver(Solver):
solverId = 'MIPCL'
recognition_expr = re.compile('Reading data')
primalbound_expr = re.compile('Objective value: (\\S*)')
dualbound_expr = re.compile(
'^(?:\\s*lower-bound: |Objective value: )(\\S+)')
solvingtime_expr = re.compile('Solution time: (\\S*)')
version_expr = re.compile('MIPCL version (\\S*)')
solverstatusmap = {'Objective value: (\\S*) - optimality proven': Key.
SolverStatusCodes.Optimal, 'This problem is infeasible': Key.
SolverStatusCodes.Infeasible, 'Time limit reached': Key.
SolverStatusCodes.TimeLimit}
inTable = False
primalboundhistory_exp = re.compile(
'^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%'
)
endtable = re.compile('^===========================================')
def __init__(self, **kw):
super(MIPCLSolver, self).__init__(**kw)
def extracHistory(self, line: str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
<|reserved_special_token_1|>
"""
Created on Apr 27, 2017
@author: Franziska Schlösser
"""
from ipet.parsing.Solver import Solver
import re
from ipet import Key
from ipet import misc
class MIPCLSolver(Solver):
solverId = "MIPCL"
recognition_expr = re.compile("Reading data")
primalbound_expr = re.compile("Objective value: (\S*)")
dualbound_expr = re.compile("^(?:\s*lower-bound: |Objective value: )(\S+)")
solvingtime_expr = re.compile("Solution time: (\S*)")
version_expr = re.compile("MIPCL version (\S*)")
solverstatusmap = {"Objective value: (\S*) - optimality proven" : Key.SolverStatusCodes.Optimal,
"This problem is infeasible" : Key.SolverStatusCodes.Infeasible,
"Time limit reached" : Key.SolverStatusCodes.TimeLimit}
# variables needed for primal bound history
inTable = False
primalboundhistory_exp = re.compile("^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%")
endtable = re.compile('^===========================================')
def __init__(self, **kw):
super(MIPCLSolver, self).__init__(**kw)
def extracHistory(self, line : str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
# history reader should be in a table. check if a display char indicates a new primal bound
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
# in the case of ugscip, we reacted on a disp char, so no problem at all.
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
|
flexible
|
{
"blob_id": "191154c896fe441519ad4f343c6d92d6304fb3db",
"index": 8187,
"step-1": "<mask token>\n\n\nclass MIPCLSolver(Solver):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def extracHistory(self, line: str):\n \"\"\" Extract the sequence of primal bounds \n\t\t\"\"\"\n if not self.isTableLine(line):\n return\n if self.inTable:\n allmatches = misc.numericExpressionOrInf.findall(line)\n if len(allmatches) == 0:\n return\n pointInTime = allmatches[0]\n pb = allmatches[4]\n db = allmatches[5]\n self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)\n self.addHistoryData(Key.DualBoundHistory, pointInTime, db)\n\n def isTableLine(self, line):\n if self.primalboundhistory_exp.match(line):\n self.inTable = True\n return False\n elif self.inTable and self.endtable.match(line):\n self.inTable = False\n return False\n return self.inTable\n",
"step-2": "<mask token>\n\n\nclass MIPCLSolver(Solver):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, **kw):\n super(MIPCLSolver, self).__init__(**kw)\n\n def extracHistory(self, line: str):\n \"\"\" Extract the sequence of primal bounds \n\t\t\"\"\"\n if not self.isTableLine(line):\n return\n if self.inTable:\n allmatches = misc.numericExpressionOrInf.findall(line)\n if len(allmatches) == 0:\n return\n pointInTime = allmatches[0]\n pb = allmatches[4]\n db = allmatches[5]\n self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)\n self.addHistoryData(Key.DualBoundHistory, pointInTime, db)\n\n def isTableLine(self, line):\n if self.primalboundhistory_exp.match(line):\n self.inTable = True\n return False\n elif self.inTable and self.endtable.match(line):\n self.inTable = False\n return False\n return self.inTable\n",
"step-3": "<mask token>\n\n\nclass MIPCLSolver(Solver):\n solverId = 'MIPCL'\n recognition_expr = re.compile('Reading data')\n primalbound_expr = re.compile('Objective value: (\\\\S*)')\n dualbound_expr = re.compile(\n '^(?:\\\\s*lower-bound: |Objective value: )(\\\\S+)')\n solvingtime_expr = re.compile('Solution time: (\\\\S*)')\n version_expr = re.compile('MIPCL version (\\\\S*)')\n solverstatusmap = {'Objective value: (\\\\S*) - optimality proven': Key.\n SolverStatusCodes.Optimal, 'This problem is infeasible': Key.\n SolverStatusCodes.Infeasible, 'Time limit reached': Key.\n SolverStatusCodes.TimeLimit}\n inTable = False\n primalboundhistory_exp = re.compile(\n '^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%'\n )\n endtable = re.compile('^===========================================')\n\n def __init__(self, **kw):\n super(MIPCLSolver, self).__init__(**kw)\n\n def extracHistory(self, line: str):\n \"\"\" Extract the sequence of primal bounds \n\t\t\"\"\"\n if not self.isTableLine(line):\n return\n if self.inTable:\n allmatches = misc.numericExpressionOrInf.findall(line)\n if len(allmatches) == 0:\n return\n pointInTime = allmatches[0]\n pb = allmatches[4]\n db = allmatches[5]\n self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)\n self.addHistoryData(Key.DualBoundHistory, pointInTime, db)\n\n def isTableLine(self, line):\n if self.primalboundhistory_exp.match(line):\n self.inTable = True\n return False\n elif self.inTable and self.endtable.match(line):\n self.inTable = False\n return False\n return self.inTable\n",
"step-4": "<mask token>\nfrom ipet.parsing.Solver import Solver\nimport re\nfrom ipet import Key\nfrom ipet import misc\n\n\nclass MIPCLSolver(Solver):\n solverId = 'MIPCL'\n recognition_expr = re.compile('Reading data')\n primalbound_expr = re.compile('Objective value: (\\\\S*)')\n dualbound_expr = re.compile(\n '^(?:\\\\s*lower-bound: |Objective value: )(\\\\S+)')\n solvingtime_expr = re.compile('Solution time: (\\\\S*)')\n version_expr = re.compile('MIPCL version (\\\\S*)')\n solverstatusmap = {'Objective value: (\\\\S*) - optimality proven': Key.\n SolverStatusCodes.Optimal, 'This problem is infeasible': Key.\n SolverStatusCodes.Infeasible, 'Time limit reached': Key.\n SolverStatusCodes.TimeLimit}\n inTable = False\n primalboundhistory_exp = re.compile(\n '^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%'\n )\n endtable = re.compile('^===========================================')\n\n def __init__(self, **kw):\n super(MIPCLSolver, self).__init__(**kw)\n\n def extracHistory(self, line: str):\n \"\"\" Extract the sequence of primal bounds \n\t\t\"\"\"\n if not self.isTableLine(line):\n return\n if self.inTable:\n allmatches = misc.numericExpressionOrInf.findall(line)\n if len(allmatches) == 0:\n return\n pointInTime = allmatches[0]\n pb = allmatches[4]\n db = allmatches[5]\n self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)\n self.addHistoryData(Key.DualBoundHistory, pointInTime, db)\n\n def isTableLine(self, line):\n if self.primalboundhistory_exp.match(line):\n self.inTable = True\n return False\n elif self.inTable and self.endtable.match(line):\n self.inTable = False\n return False\n return self.inTable\n",
"step-5": "\"\"\"\nCreated on Apr 27, 2017\n\n@author: Franziska Schlösser\n\"\"\"\n\nfrom ipet.parsing.Solver import Solver\nimport re\nfrom ipet import Key\nfrom ipet import misc\n\nclass MIPCLSolver(Solver):\n\tsolverId = \"MIPCL\"\n\trecognition_expr = re.compile(\"Reading data\")\n\tprimalbound_expr = re.compile(\"Objective value: (\\S*)\")\n\tdualbound_expr = re.compile(\"^(?:\\s*lower-bound: |Objective value: )(\\S+)\")\n\tsolvingtime_expr = re.compile(\"Solution time: (\\S*)\")\n\tversion_expr = re.compile(\"MIPCL version (\\S*)\")\n\n\tsolverstatusmap = {\"Objective value: (\\S*) - optimality proven\" : Key.SolverStatusCodes.Optimal,\n\t\t\t\"This problem is infeasible\" : Key.SolverStatusCodes.Infeasible,\n\t\t\t\"Time limit reached\" : Key.SolverStatusCodes.TimeLimit}\n\t\n\t# variables needed for primal bound history\n\tinTable = False\n\tprimalboundhistory_exp = re.compile(\"^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%\")\n\tendtable = re.compile('^===========================================')\n\n\tdef __init__(self, **kw):\n\t\tsuper(MIPCLSolver, self).__init__(**kw)\n\n\tdef extracHistory(self, line : str):\n\t\t\"\"\" Extract the sequence of primal bounds \n\t\t\"\"\"\n\t\tif not self.isTableLine(line):\n\t\t\treturn \n\t\t\n\t\t# history reader should be in a table. check if a display char indicates a new primal bound\n\t\tif self.inTable:\n\t\t\tallmatches = misc.numericExpressionOrInf.findall(line)\n\t\t\tif len(allmatches) == 0:\n\t\t\t\treturn\n\n\t\t\tpointInTime = allmatches[0]\n\t\t\tpb = allmatches[4]\n\t\t\tdb = allmatches[5]\n\t\t\t# in the case of ugscip, we reacted on a disp char, so no problem at all.\n\t\t\tself.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)\n\t\t\tself.addHistoryData(Key.DualBoundHistory, pointInTime, db)\n\t\n\tdef isTableLine(self, line):\n\t\tif self.primalboundhistory_exp.match(line):\n\t\t\tself.inTable = True\n\t\t\treturn False\n\t\telif self.inTable and self.endtable.match(line):\n\t\t\tself.inTable = False\n\t\t\treturn False\n\t\treturn self.inTable\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import time
import random
import math
people = [('Seymour', 'BOS'),
('Franny', 'DAL'),
('Zooey', 'CAK'),
('Walt', 'MIA'),
('Buddy', 'ORD'),
('Les', 'OMA')]
destination = 'LGA'
flights = dict()
for line in file('schedule.txt'):
origin, dest, depart, arrive, price = line.strip().split(',')
flights.setdefault((origin, dest), [])
flights[(origin, dest)].append((depart, arrive, int(price)))
def getMinutes(t):
x = time.strptime(t, '%H:%M')
return x[3] * 60 + x[4]
def printSchedule(r):
for d in range(len(r) / 2):
name = people[d][0]
origin = people[d][1]
out = flights[(origin, destination)][r[2 * d]]
ret = flights[(origin, destination)][r[2 * d + 1]]
print "%10s%10s %5s-%5s $%3s %5s-%5s $%3s" % (name, origin,
out[0], out[1], out[2],
ret[0], ret[1], ret[2])
def scheduleCost(sol):
totalPrice = 0
totalWait = 0
latestArrival = 0
earliestDepart = 24 * 60
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalPrice += out[2] + ret[2]
if latestArrival < getMinutes(out[1]): latestArrival = getMinutes(out[1])
if earliestDepart > getMinutes(ret[0]): earliestDepart = getMinutes(ret[0])
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalWait += latestArrival - getMinutes(out[1])
totalWait += getMinutes(ret[0]) - earliestDepart
if latestArrival > earliestDepart: totalPrice += 50
return totalWait + totalPrice
def randomOptimize(domain, costf = scheduleCost):
best = 999999999999
bestr = None
for i in range(1000):
r = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
cost = costf(r)
if cost < best:
best = cost
bestr = r
return r
def hillClimb(domain, costf = scheduleCost):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while 1:
neighbors = list()
for j in range(len(domain)):
if sol[j] > domain[j][0]:
neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j + 1 :])
if sol[j] < domain[j][1]:
neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j + 1 :])
current = costf(sol)
best = current
for j in neighbors:
cost = costf(j)
if cost < best:
best = cost
sol = j
if best == current:
break
return sol
def annealingOptimize(domain, costf = scheduleCost, T = 10000.0, cool = 0.95, step = 1):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while T > 0.1:
i = random.randint(0, len(domain) - 1)
dir = random.randint(-step, step)
vec = sol[:]
vec[i] += dir
if vec[i] < domain[i][0]: vec[i] = domain[i][0]
elif vec[i] > domain[i][1]: vec[i] = domain[i][1]
ca = costf(sol)
cb = costf(vec)
if cb < ca or random.random() < pow(math.e, -(cb - ca) / T):
sol = vec
T = T * cool
return sol
def geneticOptimize(domain, costf = scheduleCost, popSize = 50, step = 1,
mutProb = 0.2, elite = 0.2, maxIter = 100):
def mutate(vec):
i = random.randint(0, len(domain) - 1)
if random.random < 0.5 and vec[i] > domain[i][0]:
return vec[0 : i] + [vec[i] - step] + vec[i + 1 :]
elif vec[i] < domain[i][1]:
return vec[0 : i] + [vec[i] + step] + vec[i + 1 :]
def crossOver(r1, r2):
i = random.randint(1, len(domain) - 2)
return r1[0 : i] + r2[i :]
pop = list()
for i in range(popSize):
vec = [random.randint(domain[i][0], domain[i][1])
for i in range(len(domain))]
pop.append(vec)
topElite = int(elite * popSize)
for i in range(maxIter):
scores = [(costf(v), v) for v in pop if v != None]
scores.sort()
ranked = [v for (s, v) in scores]
pop = ranked[0 : topElite]
while len(pop) < popSize:
if random.random() < mutProb:
pop.append(mutate(ranked[random.randint(0, topElite)]))
else:
c1 = random.randint(0, topElite)
c2 = random.randint(0, topElite)
pop.append(crossOver(ranked[c1], ranked[c2]))
print scores[0][0]
return scores[0][1]
|
normal
|
{
"blob_id": "bd5f298027f82edf5451f5297d577005674de4c3",
"index": 3577,
"step-1": "import time\nimport random\nimport math\n\npeople = [('Seymour', 'BOS'),\n ('Franny', 'DAL'),\n ('Zooey', 'CAK'),\n ('Walt', 'MIA'),\n ('Buddy', 'ORD'),\n ('Les', 'OMA')]\n\ndestination = 'LGA'\n\nflights = dict()\n\nfor line in file('schedule.txt'):\n origin, dest, depart, arrive, price = line.strip().split(',')\n flights.setdefault((origin, dest), [])\n flights[(origin, dest)].append((depart, arrive, int(price)))\n\n\ndef getMinutes(t):\n x = time.strptime(t, '%H:%M')\n return x[3] * 60 + x[4]\n\ndef printSchedule(r):\n for d in range(len(r) / 2):\n name = people[d][0]\n origin = people[d][1]\n out = flights[(origin, destination)][r[2 * d]]\n ret = flights[(origin, destination)][r[2 * d + 1]]\n print \"%10s%10s %5s-%5s $%3s %5s-%5s $%3s\" % (name, origin,\n out[0], out[1], out[2],\n ret[0], ret[1], ret[2])\ndef scheduleCost(sol):\n totalPrice = 0\n totalWait = 0\n latestArrival = 0\n earliestDepart = 24 * 60\n\n for d in range(len(sol) / 2):\n origin = people[d][1]\n out = flights[(origin, destination)][int(sol[2 * d])]\n ret = flights[(origin, destination)][int(sol[2 * d + 1])]\n\n totalPrice += out[2] + ret[2]\n\n if latestArrival < getMinutes(out[1]): latestArrival = getMinutes(out[1])\n if earliestDepart > getMinutes(ret[0]): earliestDepart = getMinutes(ret[0])\n\n for d in range(len(sol) / 2):\n origin = people[d][1]\n out = flights[(origin, destination)][int(sol[2 * d])]\n ret = flights[(origin, destination)][int(sol[2 * d + 1])]\n totalWait += latestArrival - getMinutes(out[1])\n totalWait += getMinutes(ret[0]) - earliestDepart\n\n if latestArrival > earliestDepart: totalPrice += 50\n\n return totalWait + totalPrice\n\ndef randomOptimize(domain, costf = scheduleCost):\n best = 999999999999\n bestr = None\n\n for i in range(1000):\n r = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n cost = costf(r)\n if cost < best:\n best = cost\n bestr = r\n\n return r\n\ndef hillClimb(domain, costf = scheduleCost):\n sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n\n while 1:\n neighbors = list()\n for j in range(len(domain)):\n if sol[j] > domain[j][0]:\n neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j + 1 :])\n if sol[j] < domain[j][1]:\n neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j + 1 :])\n\n current = costf(sol)\n best = current\n for j in neighbors:\n cost = costf(j)\n if cost < best:\n best = cost\n sol = j\n\n if best == current:\n break\n\n return sol\n\ndef annealingOptimize(domain, costf = scheduleCost, T = 10000.0, cool = 0.95, step = 1):\n sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n\n while T > 0.1:\n i = random.randint(0, len(domain) - 1)\n dir = random.randint(-step, step)\n vec = sol[:]\n vec[i] += dir\n if vec[i] < domain[i][0]: vec[i] = domain[i][0]\n elif vec[i] > domain[i][1]: vec[i] = domain[i][1]\n\n ca = costf(sol)\n cb = costf(vec)\n\n if cb < ca or random.random() < pow(math.e, -(cb - ca) / T):\n sol = vec\n\n T = T * cool\n\n return sol\n\ndef geneticOptimize(domain, costf = scheduleCost, popSize = 50, step = 1,\n mutProb = 0.2, elite = 0.2, maxIter = 100):\n def mutate(vec):\n i = random.randint(0, len(domain) - 1)\n if random.random < 0.5 and vec[i] > domain[i][0]:\n return vec[0 : i] + [vec[i] - step] + vec[i + 1 :]\n elif vec[i] < domain[i][1]:\n return vec[0 : i] + [vec[i] + step] + vec[i + 1 :]\n\n def crossOver(r1, r2):\n i = random.randint(1, len(domain) - 2)\n return r1[0 : i] + r2[i :]\n\n pop = list()\n for i in range(popSize):\n vec = [random.randint(domain[i][0], domain[i][1])\n for i in range(len(domain))]\n pop.append(vec)\n\n topElite = int(elite * popSize)\n for i in range(maxIter):\n scores = [(costf(v), v) for v in pop if v != None]\n scores.sort()\n ranked = [v for (s, v) in scores]\n pop = ranked[0 : topElite]\n while len(pop) < popSize:\n if random.random() < mutProb:\n pop.append(mutate(ranked[random.randint(0, topElite)]))\n else:\n c1 = random.randint(0, topElite)\n c2 = random.randint(0, topElite)\n pop.append(crossOver(ranked[c1], ranked[c2]))\n print scores[0][0]\n\n return scores[0][1]\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from activitystreams.core import Object
class Actor(Object):
"""Describes a generic actor."""
pass
class Application(Actor):
"""Describes a software application."""
pass
class Group(Actor):
"""Represents a formal or informal collective of Actors."""
pass
class Organization(Actor):
"""Represents an organization."""
pass
class Person(Actor):
"""Represents an individual person."""
pass
class Service(Actor):
"""Represents a service of any kind."""
pass
|
normal
|
{
"blob_id": "b92f24cddae7b392af2417b39bb4f58e3f661cc6",
"index": 2785,
"step-1": "<mask token>\n\n\nclass Group(Actor):\n \"\"\"Represents a formal or informal collective of Actors.\"\"\"\n pass\n\n\nclass Organization(Actor):\n \"\"\"Represents an organization.\"\"\"\n pass\n\n\nclass Person(Actor):\n \"\"\"Represents an individual person.\"\"\"\n pass\n\n\nclass Service(Actor):\n \"\"\"Represents a service of any kind.\"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass Application(Actor):\n \"\"\"Describes a software application.\"\"\"\n pass\n\n\nclass Group(Actor):\n \"\"\"Represents a formal or informal collective of Actors.\"\"\"\n pass\n\n\nclass Organization(Actor):\n \"\"\"Represents an organization.\"\"\"\n pass\n\n\nclass Person(Actor):\n \"\"\"Represents an individual person.\"\"\"\n pass\n\n\nclass Service(Actor):\n \"\"\"Represents a service of any kind.\"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass Actor(Object):\n <mask token>\n pass\n\n\nclass Application(Actor):\n \"\"\"Describes a software application.\"\"\"\n pass\n\n\nclass Group(Actor):\n \"\"\"Represents a formal or informal collective of Actors.\"\"\"\n pass\n\n\nclass Organization(Actor):\n \"\"\"Represents an organization.\"\"\"\n pass\n\n\nclass Person(Actor):\n \"\"\"Represents an individual person.\"\"\"\n pass\n\n\nclass Service(Actor):\n \"\"\"Represents a service of any kind.\"\"\"\n pass\n",
"step-4": "<mask token>\n\n\nclass Actor(Object):\n \"\"\"Describes a generic actor.\"\"\"\n pass\n\n\nclass Application(Actor):\n \"\"\"Describes a software application.\"\"\"\n pass\n\n\nclass Group(Actor):\n \"\"\"Represents a formal or informal collective of Actors.\"\"\"\n pass\n\n\nclass Organization(Actor):\n \"\"\"Represents an organization.\"\"\"\n pass\n\n\nclass Person(Actor):\n \"\"\"Represents an individual person.\"\"\"\n pass\n\n\nclass Service(Actor):\n \"\"\"Represents a service of any kind.\"\"\"\n pass\n",
"step-5": "from activitystreams.core import Object\n\n\nclass Actor(Object):\n \"\"\"Describes a generic actor.\"\"\"\n pass\n\n\nclass Application(Actor):\n \"\"\"Describes a software application.\"\"\"\n pass\n\n\nclass Group(Actor):\n \"\"\"Represents a formal or informal collective of Actors.\"\"\"\n pass\n\n\nclass Organization(Actor):\n \"\"\"Represents an organization.\"\"\"\n pass\n\n\nclass Person(Actor):\n \"\"\"Represents an individual person.\"\"\"\n pass\n\n\nclass Service(Actor):\n \"\"\"Represents a service of any kind.\"\"\"\n pass\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
import os
from PIL import Image
import cv2
import shutil
root = './train'
save_path = './thumbnail'
for r, d, files in os.walk(root):
if files != []:
for i in files:
fp = os.path.join(r, i)
label = i.split('_')[0]
dst = os.path.join(save_path, label)
if not os.path.exists(dst):
os.makedirs(dst)
img = Image.open(fp).convert('RGB')
w, h = img.size
if max(w, h) > 256:
img.thumbnail((256, 256), Image.ANTIALIAS)
img.save(os.path.join(dst, i), quality=95, subsampling=0)
else:
shutil.copy(fp, os.path.join(dst, i))
#原数据由于尺寸不一,多数是高清图片,训练时resize会很耗时,因此先resize到一个小尺寸保存起来。
# Image.thumbnail()可以起到过滤的作用,如果hw在范围内就不会resize,超过就会按比例放缩。
#处理前数据集大小为114G,处理后为86G。在 Tesla V100 32GB*2 硬件环境下,训练Baseline,处理前训练时间一个epoch约为2400s(40min),
# 处理后一个epoch约1400s(23min),极大缩小了训练时间,精度应该没有什么影响,调小判别尺寸应该还能更快,毕竟训练数据尺寸是224x224。
|
normal
|
{
"blob_id": "cc19ff829cc4a11c3dc873353fa2194ec9a87718",
"index": 8584,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor r, d, files in os.walk(root):\n if files != []:\n for i in files:\n fp = os.path.join(r, i)\n label = i.split('_')[0]\n dst = os.path.join(save_path, label)\n if not os.path.exists(dst):\n os.makedirs(dst)\n img = Image.open(fp).convert('RGB')\n w, h = img.size\n if max(w, h) > 256:\n img.thumbnail((256, 256), Image.ANTIALIAS)\n img.save(os.path.join(dst, i), quality=95, subsampling=0)\n else:\n shutil.copy(fp, os.path.join(dst, i))\n",
"step-3": "<mask token>\nroot = './train'\nsave_path = './thumbnail'\nfor r, d, files in os.walk(root):\n if files != []:\n for i in files:\n fp = os.path.join(r, i)\n label = i.split('_')[0]\n dst = os.path.join(save_path, label)\n if not os.path.exists(dst):\n os.makedirs(dst)\n img = Image.open(fp).convert('RGB')\n w, h = img.size\n if max(w, h) > 256:\n img.thumbnail((256, 256), Image.ANTIALIAS)\n img.save(os.path.join(dst, i), quality=95, subsampling=0)\n else:\n shutil.copy(fp, os.path.join(dst, i))\n",
"step-4": "import os\nfrom PIL import Image\nimport cv2\nimport shutil\nroot = './train'\nsave_path = './thumbnail'\nfor r, d, files in os.walk(root):\n if files != []:\n for i in files:\n fp = os.path.join(r, i)\n label = i.split('_')[0]\n dst = os.path.join(save_path, label)\n if not os.path.exists(dst):\n os.makedirs(dst)\n img = Image.open(fp).convert('RGB')\n w, h = img.size\n if max(w, h) > 256:\n img.thumbnail((256, 256), Image.ANTIALIAS)\n img.save(os.path.join(dst, i), quality=95, subsampling=0)\n else:\n shutil.copy(fp, os.path.join(dst, i))\n",
"step-5": "import os\nfrom PIL import Image\nimport cv2\nimport shutil\n\nroot = './train'\nsave_path = './thumbnail'\nfor r, d, files in os.walk(root):\n if files != []:\n for i in files:\n fp = os.path.join(r, i)\n label = i.split('_')[0]\n dst = os.path.join(save_path, label)\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n img = Image.open(fp).convert('RGB')\n w, h = img.size\n if max(w, h) > 256:\n img.thumbnail((256, 256), Image.ANTIALIAS)\n img.save(os.path.join(dst, i), quality=95, subsampling=0)\n else:\n shutil.copy(fp, os.path.join(dst, i))\n\n\n\n#原数据由于尺寸不一,多数是高清图片,训练时resize会很耗时,因此先resize到一个小尺寸保存起来。\n# Image.thumbnail()可以起到过滤的作用,如果hw在范围内就不会resize,超过就会按比例放缩。\n#处理前数据集大小为114G,处理后为86G。在 Tesla V100 32GB*2 硬件环境下,训练Baseline,处理前训练时间一个epoch约为2400s(40min),\n# 处理后一个epoch约1400s(23min),极大缩小了训练时间,精度应该没有什么影响,调小判别尺寸应该还能更快,毕竟训练数据尺寸是224x224。",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import timedelta, datetime
import glob
import json
import os
import re
import pickle
import os,time
import pandas as pd
import numpy as np
from collections import Counter
from sentencepiece import SentencePieceTrainer
from sentencepiece import SentencePieceProcessor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import vstack
from scipy import sparse
import scipy.sparse as spr
from scipy.sparse import vstack
from scipy import sparse
from util import write_json,makeSentencepieceModel
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm_notebook
from sklearn.neighbors import NearestNeighbors
from Dataset import Dataset
import pre_tag,word2vec_for_tag
def song_inference():
sp_total_model_path = "sp_total"
train = pd.read_json('./dataset/train.json', typ = 'frame',encoding='utf-8')
song = pd.read_json('./dataset/song_meta.json', typ = 'frame',encoding='utf-8')
plylst_tag = train['tags']
tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])
tag_dict = {x: tag_counter[x] for x in tag_counter}
tag_id_tid = dict()
tag_tid_id = dict()
for i, t in enumerate(tag_dict):
tag_id_tid[t] = i
tag_tid_id[i] = t
n_tags = len(tag_dict)
plylst_song = train['songs']
song_dict = {x: x for x in song['id']}
n_songs = len(song_dict)
train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
# song genre 내용 가져오기.
song_cate = []
for i in range(len(train)):
gnr = []
songs = train.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
train['plylst_genre'] = song_cate
plylst_genre = train['plylst_genre']
genre_counter = Counter([gen for genre in plylst_genre for gen in genre])
genre_dict = {x: genre_counter[x] for x in genre_counter}
genre_id_tid = dict()
genre_tid_id = dict()
for i, t in enumerate(genre_dict):
genre_id_tid[t] = i
genre_tid_id[i] = t
n_genre = len(genre_dict)
train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
gnr_array = np.zeros((len(train),n_genre))
for i,index in enumerate(train.index):
if i%10000 == 0:
print(i)
counter = Counter(train.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_array[i][k] = c
gnr_array.shape
song['issue_date'] = song['issue_date'].astype('str').map(lambda x : x[:6])
plylst_use = train[['plylst_title','updt_date','tags_id','songs']]
plylst_use.loc[:,'num_songs'] = plylst_use['songs'].map(len)
plylst_use.loc[:,'num_tags'] = plylst_use['tags_id'].map(len)
plylst_train = plylst_use
n_train = len(plylst_train)
row = np.repeat(range(n_train), plylst_train['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in plylst_train['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, plylst_train['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_train), plylst_train['num_tags'])
col = [tag for tags in plylst_train['tags_id'] for tag in tags]
dat = np.repeat(1, plylst_train['num_tags'].sum())
train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_tags))
train_user_songs_A_T = train_user_songs_A.T.tocsr()
train_user_songs_A_T # 행에는 노래 columns에는 User 정보 삽입
train_user_tags_A_T = train_user_tags_A.T.tocsr()
train_user_tags_A_T # 행에는 Tangs columns에는 User 정보 삽입
val = pd.read_json('./dataset/val.json', typ = 'frame',encoding='utf-8')
song_cate = []
for i in range(len(val)):
gnr = []
songs = val.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
val['plylst_genre'] = song_cate
val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
val.loc[:,'num_songs'] = val['songs'].map(len)
val.loc[:,'num_tags'] = val['tags_id'].map(len)
# val_title = cv.transform(val['plylst_title']).toarray()
gnr_val = np.zeros((len(val),n_genre))
for i,index in enumerate(val.index):
if i%10000 == 0:
print(i)
counter = Counter(val.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_val[i][k] = c
gnr_val.shape
n_val = len(val)
row = np.repeat(range(n_val), val['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in val['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, val['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_val), val['num_tags'])
col = [tag for tags in val['tags_id'] for tag in tags]
dat = np.repeat(1, val['num_tags'].sum())
val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))
val_user_songs_A_T = val_user_songs_A.T.tocsr()
val_user_tags_A_T = val_user_tags_A.T.tocsr()
test = pd.read_json('./dataset/test.json', typ = 'frame',encoding='utf-8')
song_cate = []
for i in range(len(test)):
gnr = []
songs = test.iloc[i,3]
for j in songs:
for k in song.loc[j,'song_gn_dtl_gnr_basket']:
gnr.append(k)
song_cate.append(gnr)
test['plylst_genre'] = song_cate
test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])
test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])
test.loc[:,'num_songs'] = test['songs'].map(len)
test.loc[:,'num_tags'] = test['tags_id'].map(len)
# test_title = cv.transform(test['plylst_title']).toarray()
gnr_test = np.zeros((len(test),n_genre))
for i,index in enumerate(test.index):
if i%10000 == 0:
print(i)
counter = Counter(test.loc[index]['plylst_genre_id'])
for (k,c) in counter.items():
gnr_test[i][k] = c
gnr_test.shape
n_test = len(test)
row = np.repeat(range(n_test), test['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in test['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, test['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_songs)) # csr_matrix 제작
row = np.repeat(range(n_test), test['num_tags'])
col = [tag for tags in test['tags_id'] for tag in tags]
dat = np.repeat(1, test['num_tags'].sum())
test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags))
test_user_songs_A_T = test_user_songs_A.T.tocsr()
test_user_tags_A_T = test_user_tags_A.T.tocsr()
data_all = pd.concat([train,val,test])
data_all.index = range(len(data_all))
arts = song['artist_id_basket'].map(lambda x : x[0])
arts = pd.DataFrame(arts)
art_counts = arts['artist_id_basket'].value_counts().reset_index()
art_counts.columns = ['artist_id_basket','counts']
arts2 = pd.merge(arts,art_counts,how='left',on=['artist_id_basket'])
song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]
song_art = song_art[['artist_id_basket']]
#아티스트 대분류
ART_cate = []
for i in tqdm_notebook(range(len(data_all))):
ART = []
songs = data_all.loc[i,'songs']
for j in songs:
if j in song_art.index :
for k in song_art.loc[j,'artist_id_basket'] :
ART.append(k)
ART_cate.append(ART)
data_all['plylst_ARTIST'] = ART_cate
plylst_ARTIST = data_all['plylst_ARTIST']
ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in ARTIST])
ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}
ARTIST_id_tid = dict()
ARTIST_tid_id = dict()
for i, t in enumerate(ARTIST_dict):
ARTIST_id_tid[t] = i
ARTIST_tid_id[i] = t
n_ARTIST = len(ARTIST_dict)
data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x: [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])
ART_data_all = np.zeros((len(data_all),n_ARTIST))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])
for (k,c) in counter.items():
ART_data_all[i][k] = c
ART_data_all.shape
ART_array = ART_data_all[:len(train)]
ART_val = ART_data_all[len(train):len(train)+len(val)]
ART_test = ART_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# ART_data_all = sparse.csr_matrix(ART_data_all)
del ART_data_all
ART_array = sparse.csr_matrix(ART_array)
ART_val = sparse.csr_matrix(ART_val)
ART_test = sparse.csr_matrix(ART_test)
# song tim 내용 가져오기.
tim_cate = []
for i in tqdm_notebook(range(len(data_all))):
tim = []
songs = data_all.loc[i,'songs']
for j in songs:
tim.append(song.loc[j,'issue_date'])
tim_cate.append(tim)
data_all['plylst_times'] = tim_cate
plylst_times = data_all['plylst_times']
times_counter = Counter([tim for times in plylst_times for tim in times])
times_dict = {x: times_counter[x] for x in times_counter}
times_id_tid = dict()
times_tid_id = dict()
for i, t in enumerate(times_dict):
times_id_tid[t] = i
times_tid_id[i] = t
n_times = len(times_dict)
data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])
tim_data_all = np.zeros((len(data_all),n_times))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_times_id'])
for (k,c) in counter.items():
tim_data_all[i][k] = c
tim_array = tim_data_all[:len(train)]
tim_val = tim_data_all[len(train):len(train)+len(val)]
tim_test = tim_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# tim_data_all = sparse.csr_matrix(tim_data_all)
del tim_data_all
tim_array = sparse.csr_matrix(tim_array)
tim_val = sparse.csr_matrix(tim_val)
tim_test = sparse.csr_matrix(tim_test)
#장르 대분류
GEN_cate = []
for i in tqdm_notebook(range(len(data_all))):
GEN = []
songs = data_all.loc[i,'songs']
for j in songs:
for k in song.loc[j,'song_gn_gnr_basket'] :
GEN.append(k)
GEN_cate.append(GEN)
data_all['plylst_GENRE'] = GEN_cate
plylst_GENRE = data_all['plylst_GENRE']
GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])
GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}
GENRE_id_tid = dict()
GENRE_tid_id = dict()
for i, t in enumerate(GENRE_dict):
GENRE_id_tid[t] = i
GENRE_tid_id[i] = t
n_GENRE = len(GENRE_dict)
data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])
GEN_data_all = np.zeros((len(data_all),n_GENRE))
for i,index in enumerate(data_all.index):
if i%10000 == 0:
print(i)
counter = Counter(data_all.loc[index]['plylst_GENRE_id'])
for (k,c) in counter.items():
GEN_data_all[i][k] = c
GEN_array = GEN_data_all[:len(train)]
GEN_val = GEN_data_all[len(train):len(train)+len(val)]
GEN_test = GEN_data_all[len(train)+len(val):len(train)+len(val)+len(test)]
# GEN_data_all = sparse.csr_matrix(GEN_data_all)
del GEN_data_all
GEN_array = sparse.csr_matrix(GEN_array)
GEN_val = sparse.csr_matrix(GEN_val)
GEN_test = sparse.csr_matrix(GEN_test)
content = data_all['plylst_title']
if "{}.model".format(sp_total_model_path) not in os.listdir():
makeSentencepieceModel(data_all,sp_total_model_path)
sp = SentencePieceProcessor()
sp.Load("{}.model".format(sp_total_model_path))
cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)
content = data_all['plylst_title']
tdm = cv.fit_transform(content)
title_tdm = tdm.toarray()
title_tr = title_tdm[:len(train)]
title_va = title_tdm[len(train):len(train)+len(val)]
title_ts = title_tdm[len(train)+len(val):len(train)+len(val)+len(test)]
title_gnr = np.concatenate((gnr_array,title_tr),axis=1)
val_title_gnr = np.concatenate((gnr_val,title_va),axis=1)
test_title_gnr = np.concatenate((gnr_test,title_ts),axis=1)
title_sp = sparse.csr_matrix(title_tdm)
title_gnr = sparse.csr_matrix(title_gnr)
val_title_gnr = sparse.csr_matrix(val_title_gnr)
test_title_gnr = sparse.csr_matrix(test_title_gnr)
title_gnr = vstack([title_gnr,val_title_gnr,test_title_gnr])
song_sp = vstack([train_user_songs_A,val_user_songs_A,test_user_songs_A])
tag_sp = vstack([train_user_tags_A,val_user_tags_A,test_user_tags_A])
times_sp = vstack([tim_array,tim_val,tim_test])
GEN_sp = vstack([GEN_array,GEN_val,GEN_test])
ART_sp = vstack([ART_array,ART_val,ART_test])
# song_sp_T = song_sp.T.tocsr()
# tag_sp_T = tag_sp.T.tocsr()
model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)
model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)
model_knn_song25.fit(song_sp)
model_knn_tag25.fit(tag_sp)
model_knn_title25.fit(title_sp)
model_knn_title_gnr25.fit(title_gnr)
model_knn_times25.fit(times_sp)
model_knn_GEN25.fit(GEN_sp)
model_knn_ART25.fit(ART_sp)
model_knn_song40.fit(song_sp)
model_knn_tag40.fit(tag_sp)
model_knn_title40.fit(title_sp)
model_knn_title_gnr40.fit(title_gnr)
model_knn_times40.fit(times_sp)
model_knn_GEN40.fit(GEN_sp)
model_knn_ART40.fit(ART_sp)
train.loc[:,'num_songs'] = train['songs'].map(len)
train.loc[:,'num_tags'] = train['tags_id'].map(len)
data_all = pd.concat([train,val,test])
data_all.index = range(len(data_all))
res = []
for i in tqdm_notebook(range(len(test))):
data = test.iloc[i]
pid = i
if len(data['songs']) >= 2 and len(data['tags_id']) >=2 :
p = np.zeros((707989,1))
p[data['songs']] = 1
pp = np.zeros((n_tags,1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
####### 40 ####################################################
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]
row = np.repeat(range(40), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1,cand2,how='outer',on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2
cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])
######tag######
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['songs']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
# tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]
row = np.repeat(range(25), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song*test_title_genre*test_tim*test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
####### 40 ####################################################
tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]
row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(40), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_title_genre * test_tim * test_GEN * test_ART
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거
cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()
cand_all = pd.merge(cand1,cand2,how='outer',on='index')
cand_all = cand_all.fillna(0)
cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2
cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])
#######tag########
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['tags_id']) !=0:
p = np.zeros((n_tags,1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]
row = np.repeat(range(25), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(25), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
testi = cosine_similarity(tra_tag_sp,pp.T)
if len(data['plylst_title']) != 0 :
tra_title_gnr = title_tdm[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]]
testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res.append({
"id": test.loc[pid,'id'],
"songs": list(cand_song_idx),
"tags": rec_tag_idx
})
else :
cand_song = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)
res.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
for i in range(len(res)):
if len(res[i]['songs']) != 100:
print('song 에서 {}번째 오류 발생'.format(i))
if len(res[i]['tags']) != 10:
print('tag 에서 {}번째 오류 발생'.format(i))
rec = []
for i in range(len(res)):
rec.append({
"id": res[i]['id'],
"songs": list(res[i]['songs']),
"tags": res[i]['tags']
})
result1 = pd.DataFrame(rec)
model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)
model_knn_song.fit(song_sp)
model_knn_tag.fit(tag_sp)
model_knn_title.fit(title_sp)
model_knn_title_gnr.fit(title_gnr)
model_knn_times.fit(times_sp)
model_knn_GEN.fit(GEN_sp)
model_knn_ART.fit(ART_sp)
res2 = []
for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):
data = test.iloc[i]
pid = i
if len(data['songs']) != 0 and len(data['tags_id']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
pp = np.zeros((n_tags,1))
pp[data['tags_id']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
songs_already = data["songs"]
tags_already = data["tags_id"]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tag = cosine_similarity(tra_tag_sp,pp.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song * test_tag * test_title_genre * test_GEN
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['songs']) != 0:
p = np.zeros((707989,1))
p[data['songs']] = 1
tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_song['num_tags'])
col = [tag for tags in tra_song['tags_id'] for tag in tags]
dat = np.repeat(1, tra_song['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]
tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]
tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]
test_song = cosine_similarity(tra_song_sp,p.T)
test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])
test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])
test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])
testi = test_song*test_title_genre*test_tim*test_GEN
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
elif len(data['tags_id']) !=0:
p = np.zeros((n_tags,1))
p[data['tags_id']] = 1
tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]
row = np.repeat(range(50), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦
col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출
dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦
tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작
tra_song_sp_T = tra_song_sp.T.tocsr()
row = np.repeat(range(50), tra_tag['num_tags'])
col = [tag for tags in tra_tag['tags_id'] for tag in tags]
dat = np.repeat(1, tra_tag['num_tags'].sum())
tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))
tra_tag_sp_T = tra_tag_sp.T.tocsr()
songs_already = data["songs"]
tags_already = data["tags_id"]
testi = cosine_similarity(tra_tag_sp,pp.T)
if len(data['plylst_title']) != 0 :
tra_title_gnr = title_tdm[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]]
testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])
testi = testi * testi_title
cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴
cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출
cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출
cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시
cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]
cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]
rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
else:
cand_song = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():
for j in li:
cand_song.append(j)
cand_tag = []
for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():
for j in li:
cand_tag.append(j)
cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)
rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)
res2.append({
"id": test.loc[pid,'id'],
"songs": cand_song_idx,
"tags": rec_tag_idx
})
pd.DataFrame(res2)
rec2 = []
for i in range(len(res2)):
rec2.append({
"id": res2[i]['id'],
"songs": list(res2[i]['songs']),
"tags": res2[i]['tags']
})
result2 = pd.DataFrame(rec2)['songs']
n_index = [10498,6361,1960,8705,9310]
result2.index = n_index
result1.loc[n_index,'songs'] = result2
result1['songs'].apply(len).sort_values()
#그럼에도 채워지지 않은 6361에 대해서 상위 100곡 추천
s = []
for song in train.songs.tolist():
s += song
r1 = dict(Counter(s))
r_song = sorted(r1.items(), key=lambda x: -x[1])
r_song_top = r_song[:100] # 몇 곡 할지도 정해야 함
list_song = list(dict(r_song_top).keys())
len(list_song)
sub= []
for j in range(len(result1)) :
sub.append(result1.loc[j].to_dict())
sub[6361]['songs'] = list_song
pd.DataFrame(sub)['songs'].apply(len).sort_values()
write_json(sub,'final_songs.json')
return sub
if __name__ == '__main__':
_data = Dataset()
pre_tag.run(_data.test,_data.n_songs,_data.n_tags,_data.spr_list,_data.tag_tid_id)
final_tags = word2vec_for_tag.run(_data.total,_data.test)
final_songs = song_inference()
result = []
for f_songs, f_tags in zip(final_songs,final_tags):
result.append({
'id':f_songs['id'],
'songs':f_songs['songs'],
'tags':f_tags['tags']
})
write_json(result, 'results.json')
|
normal
|
{
"blob_id": "05573b4ff68ca8638f8e13946b410df2a012840a",
"index": 1829,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef song_inference():\n sp_total_model_path = 'sp_total'\n train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')\n song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=\n 'utf-8')\n plylst_tag = train['tags']\n tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])\n tag_dict = {x: tag_counter[x] for x in tag_counter}\n tag_id_tid = dict()\n tag_tid_id = dict()\n for i, t in enumerate(tag_dict):\n tag_id_tid[t] = i\n tag_tid_id[i] = t\n n_tags = len(tag_dict)\n plylst_song = train['songs']\n song_dict = {x: x for x in song['id']}\n n_songs = len(song_dict)\n train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n song_cate = []\n for i in range(len(train)):\n gnr = []\n songs = train.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n train['plylst_genre'] = song_cate\n plylst_genre = train['plylst_genre']\n genre_counter = Counter([gen for genre in plylst_genre for gen in genre])\n genre_dict = {x: genre_counter[x] for x in genre_counter}\n genre_id_tid = dict()\n genre_tid_id = dict()\n for i, t in enumerate(genre_dict):\n genre_id_tid[t] = i\n genre_tid_id[i] = t\n n_genre = len(genre_dict)\n train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n gnr_array = np.zeros((len(train), n_genre))\n for i, index in enumerate(train.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(train.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_array[i][k] = c\n gnr_array.shape\n song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])\n plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]\n plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)\n plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)\n plylst_train = plylst_use\n n_train = len(plylst_train)\n row = np.repeat(range(n_train), plylst_train['num_songs'])\n col = [song for songs in plylst_train['songs'] for song in songs]\n dat = np.repeat(1, plylst_train['num_songs'].sum())\n train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_songs))\n row = np.repeat(range(n_train), plylst_train['num_tags'])\n col = [tag for tags in plylst_train['tags_id'] for tag in tags]\n dat = np.repeat(1, plylst_train['num_tags'].sum())\n train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_tags))\n train_user_songs_A_T = train_user_songs_A.T.tocsr()\n train_user_songs_A_T\n train_user_tags_A_T = train_user_tags_A.T.tocsr()\n train_user_tags_A_T\n val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(val)):\n gnr = []\n songs = val.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n val['plylst_genre'] = song_cate\n val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n val.loc[:, 'num_songs'] = val['songs'].map(len)\n val.loc[:, 'num_tags'] = val['tags_id'].map(len)\n gnr_val = np.zeros((len(val), n_genre))\n for i, index in enumerate(val.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(val.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_val[i][k] = c\n gnr_val.shape\n n_val = len(val)\n row = np.repeat(range(n_val), val['num_songs'])\n col = [song for songs in val['songs'] for song in songs]\n dat = np.repeat(1, val['num_songs'].sum())\n val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)\n )\n row = np.repeat(range(n_val), val['num_tags'])\n col = [tag for tags in val['tags_id'] for tag in tags]\n dat = np.repeat(1, val['num_tags'].sum())\n val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))\n val_user_songs_A_T = val_user_songs_A.T.tocsr()\n val_user_tags_A_T = val_user_tags_A.T.tocsr()\n test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(test)):\n gnr = []\n songs = test.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n test['plylst_genre'] = song_cate\n test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n test.loc[:, 'num_songs'] = test['songs'].map(len)\n test.loc[:, 'num_tags'] = test['tags_id'].map(len)\n gnr_test = np.zeros((len(test), n_genre))\n for i, index in enumerate(test.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(test.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_test[i][k] = c\n gnr_test.shape\n n_test = len(test)\n row = np.repeat(range(n_test), test['num_songs'])\n col = [song for songs in test['songs'] for song in songs]\n dat = np.repeat(1, test['num_songs'].sum())\n test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,\n n_songs))\n row = np.repeat(range(n_test), test['num_tags'])\n col = [tag for tags in test['tags_id'] for tag in tags]\n dat = np.repeat(1, test['num_tags'].sum())\n test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)\n )\n test_user_songs_A_T = test_user_songs_A.T.tocsr()\n test_user_tags_A_T = test_user_tags_A.T.tocsr()\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n arts = song['artist_id_basket'].map(lambda x: x[0])\n arts = pd.DataFrame(arts)\n art_counts = arts['artist_id_basket'].value_counts().reset_index()\n art_counts.columns = ['artist_id_basket', 'counts']\n arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])\n song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]\n song_art = song_art[['artist_id_basket']]\n ART_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n ART = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n if j in song_art.index:\n for k in song_art.loc[j, 'artist_id_basket']:\n ART.append(k)\n ART_cate.append(ART)\n data_all['plylst_ARTIST'] = ART_cate\n plylst_ARTIST = data_all['plylst_ARTIST']\n ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in\n ARTIST])\n ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}\n ARTIST_id_tid = dict()\n ARTIST_tid_id = dict()\n for i, t in enumerate(ARTIST_dict):\n ARTIST_id_tid[t] = i\n ARTIST_tid_id[i] = t\n n_ARTIST = len(ARTIST_dict)\n data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:\n [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])\n ART_data_all = np.zeros((len(data_all), n_ARTIST))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])\n for k, c in counter.items():\n ART_data_all[i][k] = c\n ART_data_all.shape\n ART_array = ART_data_all[:len(train)]\n ART_val = ART_data_all[len(train):len(train) + len(val)]\n ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del ART_data_all\n ART_array = sparse.csr_matrix(ART_array)\n ART_val = sparse.csr_matrix(ART_val)\n ART_test = sparse.csr_matrix(ART_test)\n tim_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n tim = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n tim.append(song.loc[j, 'issue_date'])\n tim_cate.append(tim)\n data_all['plylst_times'] = tim_cate\n plylst_times = data_all['plylst_times']\n times_counter = Counter([tim for times in plylst_times for tim in times])\n times_dict = {x: times_counter[x] for x in times_counter}\n times_id_tid = dict()\n times_tid_id = dict()\n for i, t in enumerate(times_dict):\n times_id_tid[t] = i\n times_tid_id[i] = t\n n_times = len(times_dict)\n data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [\n times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])\n tim_data_all = np.zeros((len(data_all), n_times))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_times_id'])\n for k, c in counter.items():\n tim_data_all[i][k] = c\n tim_array = tim_data_all[:len(train)]\n tim_val = tim_data_all[len(train):len(train) + len(val)]\n tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del tim_data_all\n tim_array = sparse.csr_matrix(tim_array)\n tim_val = sparse.csr_matrix(tim_val)\n tim_test = sparse.csr_matrix(tim_test)\n GEN_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n GEN = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n for k in song.loc[j, 'song_gn_gnr_basket']:\n GEN.append(k)\n GEN_cate.append(GEN)\n data_all['plylst_GENRE'] = GEN_cate\n plylst_GENRE = data_all['plylst_GENRE']\n GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])\n GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}\n GENRE_id_tid = dict()\n GENRE_tid_id = dict()\n for i, t in enumerate(GENRE_dict):\n GENRE_id_tid[t] = i\n GENRE_tid_id[i] = t\n n_GENRE = len(GENRE_dict)\n data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [\n GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])\n GEN_data_all = np.zeros((len(data_all), n_GENRE))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_GENRE_id'])\n for k, c in counter.items():\n GEN_data_all[i][k] = c\n GEN_array = GEN_data_all[:len(train)]\n GEN_val = GEN_data_all[len(train):len(train) + len(val)]\n GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del GEN_data_all\n GEN_array = sparse.csr_matrix(GEN_array)\n GEN_val = sparse.csr_matrix(GEN_val)\n GEN_test = sparse.csr_matrix(GEN_test)\n content = data_all['plylst_title']\n if '{}.model'.format(sp_total_model_path) not in os.listdir():\n makeSentencepieceModel(data_all, sp_total_model_path)\n sp = SentencePieceProcessor()\n sp.Load('{}.model'.format(sp_total_model_path))\n cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)\n content = data_all['plylst_title']\n tdm = cv.fit_transform(content)\n title_tdm = tdm.toarray()\n title_tr = title_tdm[:len(train)]\n title_va = title_tdm[len(train):len(train) + len(val)]\n title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(\n test)]\n title_gnr = np.concatenate((gnr_array, title_tr), axis=1)\n val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)\n test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)\n title_sp = sparse.csr_matrix(title_tdm)\n title_gnr = sparse.csr_matrix(title_gnr)\n val_title_gnr = sparse.csr_matrix(val_title_gnr)\n test_title_gnr = sparse.csr_matrix(test_title_gnr)\n title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])\n song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])\n tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])\n times_sp = vstack([tim_array, tim_val, tim_test])\n GEN_sp = vstack([GEN_array, GEN_val, GEN_test])\n ART_sp = vstack([ART_array, ART_val, ART_test])\n model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=25, n_jobs=-1)\n model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=40, n_jobs=-1)\n model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_song25.fit(song_sp)\n model_knn_tag25.fit(tag_sp)\n model_knn_title25.fit(title_sp)\n model_knn_title_gnr25.fit(title_gnr)\n model_knn_times25.fit(times_sp)\n model_knn_GEN25.fit(GEN_sp)\n model_knn_ART25.fit(ART_sp)\n model_knn_song40.fit(song_sp)\n model_knn_tag40.fit(tag_sp)\n model_knn_title40.fit(title_sp)\n model_knn_title_gnr40.fit(title_gnr)\n model_knn_times40.fit(times_sp)\n model_knn_GEN40.fit(GEN_sp)\n model_knn_ART40.fit(ART_sp)\n train.loc[:, 'num_songs'] = train['songs'].map(len)\n train.loc[:, 'num_tags'] = train['tags_id'].map(len)\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n res = []\n for i in tqdm_notebook(range(len(test))):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(40), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(40), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title25.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': list(\n cand_song_idx), 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n for i in range(len(res)):\n if len(res[i]['songs']) != 100:\n print('song 에서 {}번째 오류 발생'.format(i))\n if len(res[i]['tags']) != 10:\n print('tag 에서 {}번째 오류 발생'.format(i))\n rec = []\n for i in range(len(res)):\n rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),\n 'tags': res[i]['tags']})\n result1 = pd.DataFrame(rec)\n model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=50, n_jobs=-1)\n model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_song.fit(song_sp)\n model_knn_tag.fit(tag_sp)\n model_knn_title.fit(title_sp)\n model_knn_title_gnr.fit(title_gnr)\n model_knn_times.fit(times_sp)\n model_knn_GEN.fit(GEN_sp)\n model_knn_ART.fit(ART_sp)\n res2 = []\n for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) != 0 and len(data['tags_id']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_tag * test_title_genre * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_title_genre * test_tim * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n pd.DataFrame(res2)\n rec2 = []\n for i in range(len(res2)):\n rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),\n 'tags': res2[i]['tags']})\n result2 = pd.DataFrame(rec2)['songs']\n n_index = [10498, 6361, 1960, 8705, 9310]\n result2.index = n_index\n result1.loc[n_index, 'songs'] = result2\n result1['songs'].apply(len).sort_values()\n s = []\n for song in train.songs.tolist():\n s += song\n r1 = dict(Counter(s))\n r_song = sorted(r1.items(), key=lambda x: -x[1])\n r_song_top = r_song[:100]\n list_song = list(dict(r_song_top).keys())\n len(list_song)\n sub = []\n for j in range(len(result1)):\n sub.append(result1.loc[j].to_dict())\n sub[6361]['songs'] = list_song\n pd.DataFrame(sub)['songs'].apply(len).sort_values()\n write_json(sub, 'final_songs.json')\n return sub\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef song_inference():\n sp_total_model_path = 'sp_total'\n train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')\n song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=\n 'utf-8')\n plylst_tag = train['tags']\n tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])\n tag_dict = {x: tag_counter[x] for x in tag_counter}\n tag_id_tid = dict()\n tag_tid_id = dict()\n for i, t in enumerate(tag_dict):\n tag_id_tid[t] = i\n tag_tid_id[i] = t\n n_tags = len(tag_dict)\n plylst_song = train['songs']\n song_dict = {x: x for x in song['id']}\n n_songs = len(song_dict)\n train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n song_cate = []\n for i in range(len(train)):\n gnr = []\n songs = train.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n train['plylst_genre'] = song_cate\n plylst_genre = train['plylst_genre']\n genre_counter = Counter([gen for genre in plylst_genre for gen in genre])\n genre_dict = {x: genre_counter[x] for x in genre_counter}\n genre_id_tid = dict()\n genre_tid_id = dict()\n for i, t in enumerate(genre_dict):\n genre_id_tid[t] = i\n genre_tid_id[i] = t\n n_genre = len(genre_dict)\n train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n gnr_array = np.zeros((len(train), n_genre))\n for i, index in enumerate(train.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(train.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_array[i][k] = c\n gnr_array.shape\n song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])\n plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]\n plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)\n plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)\n plylst_train = plylst_use\n n_train = len(plylst_train)\n row = np.repeat(range(n_train), plylst_train['num_songs'])\n col = [song for songs in plylst_train['songs'] for song in songs]\n dat = np.repeat(1, plylst_train['num_songs'].sum())\n train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_songs))\n row = np.repeat(range(n_train), plylst_train['num_tags'])\n col = [tag for tags in plylst_train['tags_id'] for tag in tags]\n dat = np.repeat(1, plylst_train['num_tags'].sum())\n train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_tags))\n train_user_songs_A_T = train_user_songs_A.T.tocsr()\n train_user_songs_A_T\n train_user_tags_A_T = train_user_tags_A.T.tocsr()\n train_user_tags_A_T\n val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(val)):\n gnr = []\n songs = val.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n val['plylst_genre'] = song_cate\n val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n val.loc[:, 'num_songs'] = val['songs'].map(len)\n val.loc[:, 'num_tags'] = val['tags_id'].map(len)\n gnr_val = np.zeros((len(val), n_genre))\n for i, index in enumerate(val.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(val.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_val[i][k] = c\n gnr_val.shape\n n_val = len(val)\n row = np.repeat(range(n_val), val['num_songs'])\n col = [song for songs in val['songs'] for song in songs]\n dat = np.repeat(1, val['num_songs'].sum())\n val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)\n )\n row = np.repeat(range(n_val), val['num_tags'])\n col = [tag for tags in val['tags_id'] for tag in tags]\n dat = np.repeat(1, val['num_tags'].sum())\n val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))\n val_user_songs_A_T = val_user_songs_A.T.tocsr()\n val_user_tags_A_T = val_user_tags_A.T.tocsr()\n test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(test)):\n gnr = []\n songs = test.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n test['plylst_genre'] = song_cate\n test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n test.loc[:, 'num_songs'] = test['songs'].map(len)\n test.loc[:, 'num_tags'] = test['tags_id'].map(len)\n gnr_test = np.zeros((len(test), n_genre))\n for i, index in enumerate(test.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(test.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_test[i][k] = c\n gnr_test.shape\n n_test = len(test)\n row = np.repeat(range(n_test), test['num_songs'])\n col = [song for songs in test['songs'] for song in songs]\n dat = np.repeat(1, test['num_songs'].sum())\n test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,\n n_songs))\n row = np.repeat(range(n_test), test['num_tags'])\n col = [tag for tags in test['tags_id'] for tag in tags]\n dat = np.repeat(1, test['num_tags'].sum())\n test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)\n )\n test_user_songs_A_T = test_user_songs_A.T.tocsr()\n test_user_tags_A_T = test_user_tags_A.T.tocsr()\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n arts = song['artist_id_basket'].map(lambda x: x[0])\n arts = pd.DataFrame(arts)\n art_counts = arts['artist_id_basket'].value_counts().reset_index()\n art_counts.columns = ['artist_id_basket', 'counts']\n arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])\n song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]\n song_art = song_art[['artist_id_basket']]\n ART_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n ART = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n if j in song_art.index:\n for k in song_art.loc[j, 'artist_id_basket']:\n ART.append(k)\n ART_cate.append(ART)\n data_all['plylst_ARTIST'] = ART_cate\n plylst_ARTIST = data_all['plylst_ARTIST']\n ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in\n ARTIST])\n ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}\n ARTIST_id_tid = dict()\n ARTIST_tid_id = dict()\n for i, t in enumerate(ARTIST_dict):\n ARTIST_id_tid[t] = i\n ARTIST_tid_id[i] = t\n n_ARTIST = len(ARTIST_dict)\n data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:\n [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])\n ART_data_all = np.zeros((len(data_all), n_ARTIST))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])\n for k, c in counter.items():\n ART_data_all[i][k] = c\n ART_data_all.shape\n ART_array = ART_data_all[:len(train)]\n ART_val = ART_data_all[len(train):len(train) + len(val)]\n ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del ART_data_all\n ART_array = sparse.csr_matrix(ART_array)\n ART_val = sparse.csr_matrix(ART_val)\n ART_test = sparse.csr_matrix(ART_test)\n tim_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n tim = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n tim.append(song.loc[j, 'issue_date'])\n tim_cate.append(tim)\n data_all['plylst_times'] = tim_cate\n plylst_times = data_all['plylst_times']\n times_counter = Counter([tim for times in plylst_times for tim in times])\n times_dict = {x: times_counter[x] for x in times_counter}\n times_id_tid = dict()\n times_tid_id = dict()\n for i, t in enumerate(times_dict):\n times_id_tid[t] = i\n times_tid_id[i] = t\n n_times = len(times_dict)\n data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [\n times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])\n tim_data_all = np.zeros((len(data_all), n_times))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_times_id'])\n for k, c in counter.items():\n tim_data_all[i][k] = c\n tim_array = tim_data_all[:len(train)]\n tim_val = tim_data_all[len(train):len(train) + len(val)]\n tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del tim_data_all\n tim_array = sparse.csr_matrix(tim_array)\n tim_val = sparse.csr_matrix(tim_val)\n tim_test = sparse.csr_matrix(tim_test)\n GEN_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n GEN = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n for k in song.loc[j, 'song_gn_gnr_basket']:\n GEN.append(k)\n GEN_cate.append(GEN)\n data_all['plylst_GENRE'] = GEN_cate\n plylst_GENRE = data_all['plylst_GENRE']\n GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])\n GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}\n GENRE_id_tid = dict()\n GENRE_tid_id = dict()\n for i, t in enumerate(GENRE_dict):\n GENRE_id_tid[t] = i\n GENRE_tid_id[i] = t\n n_GENRE = len(GENRE_dict)\n data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [\n GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])\n GEN_data_all = np.zeros((len(data_all), n_GENRE))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_GENRE_id'])\n for k, c in counter.items():\n GEN_data_all[i][k] = c\n GEN_array = GEN_data_all[:len(train)]\n GEN_val = GEN_data_all[len(train):len(train) + len(val)]\n GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del GEN_data_all\n GEN_array = sparse.csr_matrix(GEN_array)\n GEN_val = sparse.csr_matrix(GEN_val)\n GEN_test = sparse.csr_matrix(GEN_test)\n content = data_all['plylst_title']\n if '{}.model'.format(sp_total_model_path) not in os.listdir():\n makeSentencepieceModel(data_all, sp_total_model_path)\n sp = SentencePieceProcessor()\n sp.Load('{}.model'.format(sp_total_model_path))\n cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)\n content = data_all['plylst_title']\n tdm = cv.fit_transform(content)\n title_tdm = tdm.toarray()\n title_tr = title_tdm[:len(train)]\n title_va = title_tdm[len(train):len(train) + len(val)]\n title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(\n test)]\n title_gnr = np.concatenate((gnr_array, title_tr), axis=1)\n val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)\n test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)\n title_sp = sparse.csr_matrix(title_tdm)\n title_gnr = sparse.csr_matrix(title_gnr)\n val_title_gnr = sparse.csr_matrix(val_title_gnr)\n test_title_gnr = sparse.csr_matrix(test_title_gnr)\n title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])\n song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])\n tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])\n times_sp = vstack([tim_array, tim_val, tim_test])\n GEN_sp = vstack([GEN_array, GEN_val, GEN_test])\n ART_sp = vstack([ART_array, ART_val, ART_test])\n model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=25, n_jobs=-1)\n model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=40, n_jobs=-1)\n model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_song25.fit(song_sp)\n model_knn_tag25.fit(tag_sp)\n model_knn_title25.fit(title_sp)\n model_knn_title_gnr25.fit(title_gnr)\n model_knn_times25.fit(times_sp)\n model_knn_GEN25.fit(GEN_sp)\n model_knn_ART25.fit(ART_sp)\n model_knn_song40.fit(song_sp)\n model_knn_tag40.fit(tag_sp)\n model_knn_title40.fit(title_sp)\n model_knn_title_gnr40.fit(title_gnr)\n model_knn_times40.fit(times_sp)\n model_knn_GEN40.fit(GEN_sp)\n model_knn_ART40.fit(ART_sp)\n train.loc[:, 'num_songs'] = train['songs'].map(len)\n train.loc[:, 'num_tags'] = train['tags_id'].map(len)\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n res = []\n for i in tqdm_notebook(range(len(test))):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(40), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(40), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title25.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': list(\n cand_song_idx), 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n for i in range(len(res)):\n if len(res[i]['songs']) != 100:\n print('song 에서 {}번째 오류 발생'.format(i))\n if len(res[i]['tags']) != 10:\n print('tag 에서 {}번째 오류 발생'.format(i))\n rec = []\n for i in range(len(res)):\n rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),\n 'tags': res[i]['tags']})\n result1 = pd.DataFrame(rec)\n model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=50, n_jobs=-1)\n model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_song.fit(song_sp)\n model_knn_tag.fit(tag_sp)\n model_knn_title.fit(title_sp)\n model_knn_title_gnr.fit(title_gnr)\n model_knn_times.fit(times_sp)\n model_knn_GEN.fit(GEN_sp)\n model_knn_ART.fit(ART_sp)\n res2 = []\n for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) != 0 and len(data['tags_id']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_tag * test_title_genre * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_title_genre * test_tim * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n pd.DataFrame(res2)\n rec2 = []\n for i in range(len(res2)):\n rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),\n 'tags': res2[i]['tags']})\n result2 = pd.DataFrame(rec2)['songs']\n n_index = [10498, 6361, 1960, 8705, 9310]\n result2.index = n_index\n result1.loc[n_index, 'songs'] = result2\n result1['songs'].apply(len).sort_values()\n s = []\n for song in train.songs.tolist():\n s += song\n r1 = dict(Counter(s))\n r_song = sorted(r1.items(), key=lambda x: -x[1])\n r_song_top = r_song[:100]\n list_song = list(dict(r_song_top).keys())\n len(list_song)\n sub = []\n for j in range(len(result1)):\n sub.append(result1.loc[j].to_dict())\n sub[6361]['songs'] = list_song\n pd.DataFrame(sub)['songs'].apply(len).sort_values()\n write_json(sub, 'final_songs.json')\n return sub\n\n\nif __name__ == '__main__':\n _data = Dataset()\n pre_tag.run(_data.test, _data.n_songs, _data.n_tags, _data.spr_list,\n _data.tag_tid_id)\n final_tags = word2vec_for_tag.run(_data.total, _data.test)\n final_songs = song_inference()\n result = []\n for f_songs, f_tags in zip(final_songs, final_tags):\n result.append({'id': f_songs['id'], 'songs': f_songs['songs'],\n 'tags': f_tags['tags']})\n write_json(result, 'results.json')\n",
"step-4": "from datetime import timedelta, datetime\nimport glob\nimport json\nimport os\nimport re\nimport pickle\nimport os, time\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\nfrom sentencepiece import SentencePieceTrainer\nfrom sentencepiece import SentencePieceProcessor\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.sparse import vstack\nfrom scipy import sparse\nimport scipy.sparse as spr\nfrom scipy.sparse import vstack\nfrom scipy import sparse\nfrom util import write_json, makeSentencepieceModel\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom tqdm import tqdm_notebook\nfrom sklearn.neighbors import NearestNeighbors\nfrom Dataset import Dataset\nimport pre_tag, word2vec_for_tag\n\n\ndef song_inference():\n sp_total_model_path = 'sp_total'\n train = pd.read_json('./dataset/train.json', typ='frame', encoding='utf-8')\n song = pd.read_json('./dataset/song_meta.json', typ='frame', encoding=\n 'utf-8')\n plylst_tag = train['tags']\n tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])\n tag_dict = {x: tag_counter[x] for x in tag_counter}\n tag_id_tid = dict()\n tag_tid_id = dict()\n for i, t in enumerate(tag_dict):\n tag_id_tid[t] = i\n tag_tid_id[i] = t\n n_tags = len(tag_dict)\n plylst_song = train['songs']\n song_dict = {x: x for x in song['id']}\n n_songs = len(song_dict)\n train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n song_cate = []\n for i in range(len(train)):\n gnr = []\n songs = train.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n train['plylst_genre'] = song_cate\n plylst_genre = train['plylst_genre']\n genre_counter = Counter([gen for genre in plylst_genre for gen in genre])\n genre_dict = {x: genre_counter[x] for x in genre_counter}\n genre_id_tid = dict()\n genre_tid_id = dict()\n for i, t in enumerate(genre_dict):\n genre_id_tid[t] = i\n genre_tid_id[i] = t\n n_genre = len(genre_dict)\n train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n gnr_array = np.zeros((len(train), n_genre))\n for i, index in enumerate(train.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(train.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_array[i][k] = c\n gnr_array.shape\n song['issue_date'] = song['issue_date'].astype('str').map(lambda x: x[:6])\n plylst_use = train[['plylst_title', 'updt_date', 'tags_id', 'songs']]\n plylst_use.loc[:, 'num_songs'] = plylst_use['songs'].map(len)\n plylst_use.loc[:, 'num_tags'] = plylst_use['tags_id'].map(len)\n plylst_train = plylst_use\n n_train = len(plylst_train)\n row = np.repeat(range(n_train), plylst_train['num_songs'])\n col = [song for songs in plylst_train['songs'] for song in songs]\n dat = np.repeat(1, plylst_train['num_songs'].sum())\n train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_songs))\n row = np.repeat(range(n_train), plylst_train['num_tags'])\n col = [tag for tags in plylst_train['tags_id'] for tag in tags]\n dat = np.repeat(1, plylst_train['num_tags'].sum())\n train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train,\n n_tags))\n train_user_songs_A_T = train_user_songs_A.T.tocsr()\n train_user_songs_A_T\n train_user_tags_A_T = train_user_tags_A.T.tocsr()\n train_user_tags_A_T\n val = pd.read_json('./dataset/val.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(val)):\n gnr = []\n songs = val.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n val['plylst_genre'] = song_cate\n val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n val.loc[:, 'num_songs'] = val['songs'].map(len)\n val.loc[:, 'num_tags'] = val['tags_id'].map(len)\n gnr_val = np.zeros((len(val), n_genre))\n for i, index in enumerate(val.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(val.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_val[i][k] = c\n gnr_val.shape\n n_val = len(val)\n row = np.repeat(range(n_val), val['num_songs'])\n col = [song for songs in val['songs'] for song in songs]\n dat = np.repeat(1, val['num_songs'].sum())\n val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)\n )\n row = np.repeat(range(n_val), val['num_tags'])\n col = [tag for tags in val['tags_id'] for tag in tags]\n dat = np.repeat(1, val['num_tags'].sum())\n val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))\n val_user_songs_A_T = val_user_songs_A.T.tocsr()\n val_user_tags_A_T = val_user_tags_A.T.tocsr()\n test = pd.read_json('./dataset/test.json', typ='frame', encoding='utf-8')\n song_cate = []\n for i in range(len(test)):\n gnr = []\n songs = test.iloc[i, 3]\n for j in songs:\n for k in song.loc[j, 'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n test['plylst_genre'] = song_cate\n test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in\n x if tag_id_tid.get(t) != None])\n test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [\n genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n test.loc[:, 'num_songs'] = test['songs'].map(len)\n test.loc[:, 'num_tags'] = test['tags_id'].map(len)\n gnr_test = np.zeros((len(test), n_genre))\n for i, index in enumerate(test.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(test.loc[index]['plylst_genre_id'])\n for k, c in counter.items():\n gnr_test[i][k] = c\n gnr_test.shape\n n_test = len(test)\n row = np.repeat(range(n_test), test['num_songs'])\n col = [song for songs in test['songs'] for song in songs]\n dat = np.repeat(1, test['num_songs'].sum())\n test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test,\n n_songs))\n row = np.repeat(range(n_test), test['num_tags'])\n col = [tag for tags in test['tags_id'] for tag in tags]\n dat = np.repeat(1, test['num_tags'].sum())\n test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags)\n )\n test_user_songs_A_T = test_user_songs_A.T.tocsr()\n test_user_tags_A_T = test_user_tags_A.T.tocsr()\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n arts = song['artist_id_basket'].map(lambda x: x[0])\n arts = pd.DataFrame(arts)\n art_counts = arts['artist_id_basket'].value_counts().reset_index()\n art_counts.columns = ['artist_id_basket', 'counts']\n arts2 = pd.merge(arts, art_counts, how='left', on=['artist_id_basket'])\n song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]\n song_art = song_art[['artist_id_basket']]\n ART_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n ART = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n if j in song_art.index:\n for k in song_art.loc[j, 'artist_id_basket']:\n ART.append(k)\n ART_cate.append(ART)\n data_all['plylst_ARTIST'] = ART_cate\n plylst_ARTIST = data_all['plylst_ARTIST']\n ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in\n ARTIST])\n ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}\n ARTIST_id_tid = dict()\n ARTIST_tid_id = dict()\n for i, t in enumerate(ARTIST_dict):\n ARTIST_id_tid[t] = i\n ARTIST_tid_id[i] = t\n n_ARTIST = len(ARTIST_dict)\n data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x:\n [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])\n ART_data_all = np.zeros((len(data_all), n_ARTIST))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])\n for k, c in counter.items():\n ART_data_all[i][k] = c\n ART_data_all.shape\n ART_array = ART_data_all[:len(train)]\n ART_val = ART_data_all[len(train):len(train) + len(val)]\n ART_test = ART_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del ART_data_all\n ART_array = sparse.csr_matrix(ART_array)\n ART_val = sparse.csr_matrix(ART_val)\n ART_test = sparse.csr_matrix(ART_test)\n tim_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n tim = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n tim.append(song.loc[j, 'issue_date'])\n tim_cate.append(tim)\n data_all['plylst_times'] = tim_cate\n plylst_times = data_all['plylst_times']\n times_counter = Counter([tim for times in plylst_times for tim in times])\n times_dict = {x: times_counter[x] for x in times_counter}\n times_id_tid = dict()\n times_tid_id = dict()\n for i, t in enumerate(times_dict):\n times_id_tid[t] = i\n times_tid_id[i] = t\n n_times = len(times_dict)\n data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [\n times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])\n tim_data_all = np.zeros((len(data_all), n_times))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_times_id'])\n for k, c in counter.items():\n tim_data_all[i][k] = c\n tim_array = tim_data_all[:len(train)]\n tim_val = tim_data_all[len(train):len(train) + len(val)]\n tim_test = tim_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del tim_data_all\n tim_array = sparse.csr_matrix(tim_array)\n tim_val = sparse.csr_matrix(tim_val)\n tim_test = sparse.csr_matrix(tim_test)\n GEN_cate = []\n for i in tqdm_notebook(range(len(data_all))):\n GEN = []\n songs = data_all.loc[i, 'songs']\n for j in songs:\n for k in song.loc[j, 'song_gn_gnr_basket']:\n GEN.append(k)\n GEN_cate.append(GEN)\n data_all['plylst_GENRE'] = GEN_cate\n plylst_GENRE = data_all['plylst_GENRE']\n GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])\n GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}\n GENRE_id_tid = dict()\n GENRE_tid_id = dict()\n for i, t in enumerate(GENRE_dict):\n GENRE_id_tid[t] = i\n GENRE_tid_id[i] = t\n n_GENRE = len(GENRE_dict)\n data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [\n GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])\n GEN_data_all = np.zeros((len(data_all), n_GENRE))\n for i, index in enumerate(data_all.index):\n if i % 10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_GENRE_id'])\n for k, c in counter.items():\n GEN_data_all[i][k] = c\n GEN_array = GEN_data_all[:len(train)]\n GEN_val = GEN_data_all[len(train):len(train) + len(val)]\n GEN_test = GEN_data_all[len(train) + len(val):len(train) + len(val) +\n len(test)]\n del GEN_data_all\n GEN_array = sparse.csr_matrix(GEN_array)\n GEN_val = sparse.csr_matrix(GEN_val)\n GEN_test = sparse.csr_matrix(GEN_test)\n content = data_all['plylst_title']\n if '{}.model'.format(sp_total_model_path) not in os.listdir():\n makeSentencepieceModel(data_all, sp_total_model_path)\n sp = SentencePieceProcessor()\n sp.Load('{}.model'.format(sp_total_model_path))\n cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)\n content = data_all['plylst_title']\n tdm = cv.fit_transform(content)\n title_tdm = tdm.toarray()\n title_tr = title_tdm[:len(train)]\n title_va = title_tdm[len(train):len(train) + len(val)]\n title_ts = title_tdm[len(train) + len(val):len(train) + len(val) + len(\n test)]\n title_gnr = np.concatenate((gnr_array, title_tr), axis=1)\n val_title_gnr = np.concatenate((gnr_val, title_va), axis=1)\n test_title_gnr = np.concatenate((gnr_test, title_ts), axis=1)\n title_sp = sparse.csr_matrix(title_tdm)\n title_gnr = sparse.csr_matrix(title_gnr)\n val_title_gnr = sparse.csr_matrix(val_title_gnr)\n test_title_gnr = sparse.csr_matrix(test_title_gnr)\n title_gnr = vstack([title_gnr, val_title_gnr, test_title_gnr])\n song_sp = vstack([train_user_songs_A, val_user_songs_A, test_user_songs_A])\n tag_sp = vstack([train_user_tags_A, val_user_tags_A, test_user_tags_A])\n times_sp = vstack([tim_array, tim_val, tim_test])\n GEN_sp = vstack([GEN_array, GEN_val, GEN_test])\n ART_sp = vstack([ART_array, ART_val, ART_test])\n model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=25, n_jobs=-1)\n model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=25, n_jobs=-1)\n model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=40, n_jobs=-1)\n model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=40, n_jobs=-1)\n model_knn_song25.fit(song_sp)\n model_knn_tag25.fit(tag_sp)\n model_knn_title25.fit(title_sp)\n model_knn_title_gnr25.fit(title_gnr)\n model_knn_times25.fit(times_sp)\n model_knn_GEN25.fit(GEN_sp)\n model_knn_ART25.fit(ART_sp)\n model_knn_song40.fit(song_sp)\n model_knn_tag40.fit(tag_sp)\n model_knn_title40.fit(title_sp)\n model_knn_title_gnr40.fit(title_gnr)\n model_knn_times40.fit(times_sp)\n model_knn_GEN40.fit(GEN_sp)\n model_knn_ART40.fit(ART_sp)\n train.loc[:, 'num_songs'] = train['songs'].map(len)\n train.loc[:, 'num_tags'] = train['tags_id'].map(len)\n data_all = pd.concat([train, val, test])\n data_all.index = range(len(data_all))\n res = []\n for i in tqdm_notebook(range(len(test))):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) >= 2 and len(data['tags_id']) >= 2:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(40), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_tag * test_title_genre * test_tim *\n test_GEN * test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(40), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:i + \n 1])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:i + 1])[\n 1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:i + 1])[\n 1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_ART = cosine_similarity(tra_ART, ART_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = (test_song * test_title_genre * test_tim * test_GEN *\n test_ART)\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False]\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n cand_all = pd.merge(cand1, cand2, how='outer', on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y']) / 2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],\n ascending=False)[:100]['index'])\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title25.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res.append({'id': test.loc[pid, 'id'], 'songs': list(\n cand_song_idx), 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i\n :i + 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n for i in range(len(res)):\n if len(res[i]['songs']) != 100:\n print('song 에서 {}번째 오류 발생'.format(i))\n if len(res[i]['tags']) != 10:\n print('tag 에서 {}번째 오류 발생'.format(i))\n rec = []\n for i in range(len(res)):\n rec.append({'id': res[i]['id'], 'songs': list(res[i]['songs']),\n 'tags': res[i]['tags']})\n result1 = pd.DataFrame(rec)\n model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm=\n 'brute', n_neighbors=50, n_jobs=-1)\n model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute',\n n_neighbors=50, n_jobs=-1)\n model_knn_song.fit(song_sp)\n model_knn_tag.fit(tag_sp)\n model_knn_title.fit(title_sp)\n model_knn_title_gnr.fit(title_gnr)\n model_knn_times.fit(times_sp)\n model_knn_GEN.fit(GEN_sp)\n model_knn_ART.fit(ART_sp)\n res2 = []\n for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):\n data = test.iloc[i]\n pid = i\n if len(data['songs']) != 0 and len(data['tags_id']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n pp = np.zeros((n_tags, 1))\n pp[data['tags_id']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n songs_already = data['songs']\n tags_already = data['tags_id']\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tag = cosine_similarity(tra_tag_sp, pp.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_tag * test_title_genre * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['songs']) != 0:\n p = np.zeros((707989, 1))\n p[data['songs']] = 1\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs'])\n col = [song for songs in tra_song['songs'] for song in songs]\n dat = np.repeat(1, tra_song['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:i + 1]\n )[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:i + 1])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(\n test_title_gnr[i:i + 1])[1][0]]\n test_song = cosine_similarity(tra_song_sp, p.T)\n test_tim = cosine_similarity(tra_tim, tim_test[i:i + 1])\n test_GEN = cosine_similarity(tra_GEN, GEN_test[i:i + 1])\n test_title_genre = cosine_similarity(tra_title_gnr,\n test_title_gnr[i:i + 1])\n testi = test_song * test_title_genre * test_tim * test_GEN\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n elif len(data['tags_id']) != 0:\n p = np.zeros((n_tags, 1))\n p[data['tags_id']] = 1\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_songs'])\n col = [song for songs in tra_tag['songs'] for song in songs]\n dat = np.repeat(1, tra_tag['num_songs'].sum())\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)\n )\n tra_song_sp_T = tra_song_sp.T.tocsr()\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n songs_already = data['songs']\n tags_already = data['tags_id']\n testi = cosine_similarity(tra_tag_sp, pp.T)\n if len(data['plylst_title']) != 0:\n tra_title_gnr = title_tdm[model_knn_title.kneighbors(\n title_ts[i:i + 1])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr, title_ts[i:i +\n 1])\n testi = testi * testi_title\n cand_song = tra_song_sp_T.dot(testi)\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1]\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx,\n songs_already) == False][:100]\n cand_tag = tra_tag_sp_T.dot(testi)\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) ==\n False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n cand_tag = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:i +\n 1])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[\n :100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10\n ].index)\n res2.append({'id': test.loc[pid, 'id'], 'songs': cand_song_idx,\n 'tags': rec_tag_idx})\n pd.DataFrame(res2)\n rec2 = []\n for i in range(len(res2)):\n rec2.append({'id': res2[i]['id'], 'songs': list(res2[i]['songs']),\n 'tags': res2[i]['tags']})\n result2 = pd.DataFrame(rec2)['songs']\n n_index = [10498, 6361, 1960, 8705, 9310]\n result2.index = n_index\n result1.loc[n_index, 'songs'] = result2\n result1['songs'].apply(len).sort_values()\n s = []\n for song in train.songs.tolist():\n s += song\n r1 = dict(Counter(s))\n r_song = sorted(r1.items(), key=lambda x: -x[1])\n r_song_top = r_song[:100]\n list_song = list(dict(r_song_top).keys())\n len(list_song)\n sub = []\n for j in range(len(result1)):\n sub.append(result1.loc[j].to_dict())\n sub[6361]['songs'] = list_song\n pd.DataFrame(sub)['songs'].apply(len).sort_values()\n write_json(sub, 'final_songs.json')\n return sub\n\n\nif __name__ == '__main__':\n _data = Dataset()\n pre_tag.run(_data.test, _data.n_songs, _data.n_tags, _data.spr_list,\n _data.tag_tid_id)\n final_tags = word2vec_for_tag.run(_data.total, _data.test)\n final_songs = song_inference()\n result = []\n for f_songs, f_tags in zip(final_songs, final_tags):\n result.append({'id': f_songs['id'], 'songs': f_songs['songs'],\n 'tags': f_tags['tags']})\n write_json(result, 'results.json')\n",
"step-5": "from datetime import timedelta, datetime\nimport glob\nimport json\nimport os\nimport re\nimport pickle\n\nimport os,time\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\nfrom sentencepiece import SentencePieceTrainer\nfrom sentencepiece import SentencePieceProcessor\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.sparse import vstack\nfrom scipy import sparse\nimport scipy.sparse as spr\nfrom scipy.sparse import vstack\nfrom scipy import sparse\nfrom util import write_json,makeSentencepieceModel\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom tqdm import tqdm_notebook\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom Dataset import Dataset\nimport pre_tag,word2vec_for_tag\n\ndef song_inference():\n sp_total_model_path = \"sp_total\"\n train = pd.read_json('./dataset/train.json', typ = 'frame',encoding='utf-8')\n song = pd.read_json('./dataset/song_meta.json', typ = 'frame',encoding='utf-8')\n plylst_tag = train['tags']\n tag_counter = Counter([tg for tgs in plylst_tag for tg in tgs])\n tag_dict = {x: tag_counter[x] for x in tag_counter}\n\n tag_id_tid = dict()\n tag_tid_id = dict()\n for i, t in enumerate(tag_dict):\n tag_id_tid[t] = i\n tag_tid_id[i] = t\n n_tags = len(tag_dict)\n\n plylst_song = train['songs']\n song_dict = {x: x for x in song['id']}\n\n n_songs = len(song_dict)\n\n train['tags_id'] = train['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])\n # song genre 내용 가져오기.\n song_cate = []\n\n for i in range(len(train)):\n gnr = []\n songs = train.iloc[i,3]\n\n for j in songs:\n for k in song.loc[j,'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n\n\n train['plylst_genre'] = song_cate\n\n plylst_genre = train['plylst_genre']\n genre_counter = Counter([gen for genre in plylst_genre for gen in genre])\n genre_dict = {x: genre_counter[x] for x in genre_counter}\n\n genre_id_tid = dict()\n genre_tid_id = dict()\n for i, t in enumerate(genre_dict):\n genre_id_tid[t] = i\n genre_tid_id[i] = t\n n_genre = len(genre_dict)\n train['plylst_genre_id'] = train['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n\n gnr_array = np.zeros((len(train),n_genre))\n for i,index in enumerate(train.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(train.loc[index]['plylst_genre_id'])\n for (k,c) in counter.items():\n gnr_array[i][k] = c\n gnr_array.shape\n\n song['issue_date'] = song['issue_date'].astype('str').map(lambda x : x[:6])\n\n plylst_use = train[['plylst_title','updt_date','tags_id','songs']]\n plylst_use.loc[:,'num_songs'] = plylst_use['songs'].map(len)\n plylst_use.loc[:,'num_tags'] = plylst_use['tags_id'].map(len)\n\n plylst_train = plylst_use\n\n n_train = len(plylst_train)\n row = np.repeat(range(n_train), plylst_train['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in plylst_train['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, plylst_train['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n train_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_songs)) # csr_matrix 제작\n\n row = np.repeat(range(n_train), plylst_train['num_tags'])\n col = [tag for tags in plylst_train['tags_id'] for tag in tags]\n dat = np.repeat(1, plylst_train['num_tags'].sum())\n train_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_tags))\n\n train_user_songs_A_T = train_user_songs_A.T.tocsr()\n train_user_songs_A_T # 행에는 노래 columns에는 User 정보 삽입\n\n train_user_tags_A_T = train_user_tags_A.T.tocsr()\n train_user_tags_A_T # 행에는 Tangs columns에는 User 정보 삽입\n\n val = pd.read_json('./dataset/val.json', typ = 'frame',encoding='utf-8')\n\n song_cate = []\n\n for i in range(len(val)):\n gnr = []\n songs = val.iloc[i,3]\n\n for j in songs:\n for k in song.loc[j,'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n\n val['plylst_genre'] = song_cate\n\n val['tags_id'] = val['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])\n val['plylst_genre_id'] = val['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n val.loc[:,'num_songs'] = val['songs'].map(len)\n val.loc[:,'num_tags'] = val['tags_id'].map(len)\n # val_title = cv.transform(val['plylst_title']).toarray()\n\n gnr_val = np.zeros((len(val),n_genre))\n for i,index in enumerate(val.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(val.loc[index]['plylst_genre_id'])\n for (k,c) in counter.items():\n gnr_val[i][k] = c\n gnr_val.shape\n\n n_val = len(val)\n row = np.repeat(range(n_val), val['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in val['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, val['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n val_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_songs)) # csr_matrix 제작\n\n row = np.repeat(range(n_val), val['num_tags'])\n col = [tag for tags in val['tags_id'] for tag in tags]\n dat = np.repeat(1, val['num_tags'].sum())\n val_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_val, n_tags))\n\n val_user_songs_A_T = val_user_songs_A.T.tocsr()\n val_user_tags_A_T = val_user_tags_A.T.tocsr()\n\n test = pd.read_json('./dataset/test.json', typ = 'frame',encoding='utf-8')\n\n song_cate = []\n\n for i in range(len(test)):\n gnr = []\n songs = test.iloc[i,3]\n\n for j in songs:\n for k in song.loc[j,'song_gn_dtl_gnr_basket']:\n gnr.append(k)\n song_cate.append(gnr)\n\n test['plylst_genre'] = song_cate\n\n test['tags_id'] = test['tags'].map(lambda x: [tag_id_tid.get(t) for t in x if tag_id_tid.get(t) != None])\n test['plylst_genre_id'] = test['plylst_genre'].map(lambda x: [genre_id_tid.get(s) for s in x if genre_id_tid.get(s) != None])\n test.loc[:,'num_songs'] = test['songs'].map(len)\n test.loc[:,'num_tags'] = test['tags_id'].map(len)\n # test_title = cv.transform(test['plylst_title']).toarray()\n\n gnr_test = np.zeros((len(test),n_genre))\n for i,index in enumerate(test.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(test.loc[index]['plylst_genre_id'])\n for (k,c) in counter.items():\n gnr_test[i][k] = c\n gnr_test.shape\n\n n_test = len(test)\n row = np.repeat(range(n_test), test['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in test['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, test['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n test_user_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_songs)) # csr_matrix 제작\n\n row = np.repeat(range(n_test), test['num_tags'])\n col = [tag for tags in test['tags_id'] for tag in tags]\n dat = np.repeat(1, test['num_tags'].sum())\n test_user_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_test, n_tags))\n\n test_user_songs_A_T = test_user_songs_A.T.tocsr()\n test_user_tags_A_T = test_user_tags_A.T.tocsr()\n\n data_all = pd.concat([train,val,test])\n data_all.index = range(len(data_all))\n\n arts = song['artist_id_basket'].map(lambda x : x[0])\n\n arts = pd.DataFrame(arts)\n\n art_counts = arts['artist_id_basket'].value_counts().reset_index()\n art_counts.columns = ['artist_id_basket','counts']\n\n arts2 = pd.merge(arts,art_counts,how='left',on=['artist_id_basket'])\n\n song_art = song.iloc[arts2.query('counts >= 12')['artist_id_basket'].index]\n\n song_art = song_art[['artist_id_basket']]\n\n #아티스트 대분류\n ART_cate = []\n\n for i in tqdm_notebook(range(len(data_all))):\n ART = []\n songs = data_all.loc[i,'songs']\n\n for j in songs:\n if j in song_art.index :\n for k in song_art.loc[j,'artist_id_basket'] :\n ART.append(k)\n ART_cate.append(ART)\n\n\n data_all['plylst_ARTIST'] = ART_cate\n\n plylst_ARTIST = data_all['plylst_ARTIST']\n ARTIST_counter = Counter([ART for ARTIST in plylst_ARTIST for ART in ARTIST])\n ARTIST_dict = {x: ARTIST_counter[x] for x in ARTIST_counter}\n\n ARTIST_id_tid = dict()\n ARTIST_tid_id = dict()\n for i, t in enumerate(ARTIST_dict):\n ARTIST_id_tid[t] = i\n ARTIST_tid_id[i] = t\n n_ARTIST = len(ARTIST_dict)\n data_all['plylst_ARTIST_id'] = data_all['plylst_ARTIST'].map(lambda x: [ARTIST_id_tid.get(s) for s in x if ARTIST_id_tid.get(s) != None])\n\n ART_data_all = np.zeros((len(data_all),n_ARTIST))\n for i,index in enumerate(data_all.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_ARTIST_id'])\n for (k,c) in counter.items():\n ART_data_all[i][k] = c\n ART_data_all.shape\n\n ART_array = ART_data_all[:len(train)]\n ART_val = ART_data_all[len(train):len(train)+len(val)]\n ART_test = ART_data_all[len(train)+len(val):len(train)+len(val)+len(test)]\n\n\n # ART_data_all = sparse.csr_matrix(ART_data_all)\n del ART_data_all\n\n ART_array = sparse.csr_matrix(ART_array)\n ART_val = sparse.csr_matrix(ART_val)\n ART_test = sparse.csr_matrix(ART_test)\n\n # song tim 내용 가져오기.\n tim_cate = []\n\n for i in tqdm_notebook(range(len(data_all))):\n tim = []\n songs = data_all.loc[i,'songs']\n\n for j in songs:\n tim.append(song.loc[j,'issue_date'])\n tim_cate.append(tim)\n\n\n data_all['plylst_times'] = tim_cate\n\n plylst_times = data_all['plylst_times']\n times_counter = Counter([tim for times in plylst_times for tim in times])\n times_dict = {x: times_counter[x] for x in times_counter}\n\n times_id_tid = dict()\n times_tid_id = dict()\n for i, t in enumerate(times_dict):\n times_id_tid[t] = i\n times_tid_id[i] = t\n n_times = len(times_dict)\n data_all['plylst_times_id'] = data_all['plylst_times'].map(lambda x: [times_id_tid.get(s) for s in x if times_id_tid.get(s) != None])\n\n tim_data_all = np.zeros((len(data_all),n_times))\n for i,index in enumerate(data_all.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_times_id'])\n for (k,c) in counter.items():\n tim_data_all[i][k] = c\n\n tim_array = tim_data_all[:len(train)]\n tim_val = tim_data_all[len(train):len(train)+len(val)]\n tim_test = tim_data_all[len(train)+len(val):len(train)+len(val)+len(test)]\n\n # tim_data_all = sparse.csr_matrix(tim_data_all)\n del tim_data_all\n\n tim_array = sparse.csr_matrix(tim_array)\n tim_val = sparse.csr_matrix(tim_val)\n tim_test = sparse.csr_matrix(tim_test)\n\n #장르 대분류\n GEN_cate = []\n\n for i in tqdm_notebook(range(len(data_all))):\n GEN = []\n songs = data_all.loc[i,'songs']\n\n for j in songs:\n for k in song.loc[j,'song_gn_gnr_basket'] :\n GEN.append(k)\n GEN_cate.append(GEN)\n\n\n data_all['plylst_GENRE'] = GEN_cate\n\n plylst_GENRE = data_all['plylst_GENRE']\n GENRE_counter = Counter([GEN for GENRE in plylst_GENRE for GEN in GENRE])\n GENRE_dict = {x: GENRE_counter[x] for x in GENRE_counter}\n\n GENRE_id_tid = dict()\n GENRE_tid_id = dict()\n for i, t in enumerate(GENRE_dict):\n GENRE_id_tid[t] = i\n GENRE_tid_id[i] = t\n n_GENRE = len(GENRE_dict)\n data_all['plylst_GENRE_id'] = data_all['plylst_GENRE'].map(lambda x: [GENRE_id_tid.get(s) for s in x if GENRE_id_tid.get(s) != None])\n\n GEN_data_all = np.zeros((len(data_all),n_GENRE))\n for i,index in enumerate(data_all.index):\n if i%10000 == 0:\n print(i)\n counter = Counter(data_all.loc[index]['plylst_GENRE_id'])\n for (k,c) in counter.items():\n GEN_data_all[i][k] = c\n\n GEN_array = GEN_data_all[:len(train)]\n GEN_val = GEN_data_all[len(train):len(train)+len(val)]\n GEN_test = GEN_data_all[len(train)+len(val):len(train)+len(val)+len(test)]\n # GEN_data_all = sparse.csr_matrix(GEN_data_all)\n del GEN_data_all\n\n GEN_array = sparse.csr_matrix(GEN_array)\n GEN_val = sparse.csr_matrix(GEN_val)\n GEN_test = sparse.csr_matrix(GEN_test)\n\n content = data_all['plylst_title']\n if \"{}.model\".format(sp_total_model_path) not in os.listdir():\n makeSentencepieceModel(data_all,sp_total_model_path)\n sp = SentencePieceProcessor()\n sp.Load(\"{}.model\".format(sp_total_model_path))\n\n cv = CountVectorizer(max_features=3000, tokenizer=sp.encode_as_pieces)\n content = data_all['plylst_title']\n tdm = cv.fit_transform(content)\n\n title_tdm = tdm.toarray()\n\n title_tr = title_tdm[:len(train)]\n title_va = title_tdm[len(train):len(train)+len(val)]\n title_ts = title_tdm[len(train)+len(val):len(train)+len(val)+len(test)]\n\n title_gnr = np.concatenate((gnr_array,title_tr),axis=1)\n val_title_gnr = np.concatenate((gnr_val,title_va),axis=1)\n test_title_gnr = np.concatenate((gnr_test,title_ts),axis=1)\n\n title_sp = sparse.csr_matrix(title_tdm)\n\n title_gnr = sparse.csr_matrix(title_gnr)\n val_title_gnr = sparse.csr_matrix(val_title_gnr)\n test_title_gnr = sparse.csr_matrix(test_title_gnr)\n\n title_gnr = vstack([title_gnr,val_title_gnr,test_title_gnr])\n song_sp = vstack([train_user_songs_A,val_user_songs_A,test_user_songs_A])\n tag_sp = vstack([train_user_tags_A,val_user_tags_A,test_user_tags_A])\n times_sp = vstack([tim_array,tim_val,tim_test])\n GEN_sp = vstack([GEN_array,GEN_val,GEN_test])\n\n\n ART_sp = vstack([ART_array,ART_val,ART_test])\n\n # song_sp_T = song_sp.T.tocsr()\n # tag_sp_T = tag_sp.T.tocsr()\n\n\n model_knn_song25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_tag25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_title25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_title_gnr25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_times25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_GEN25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n model_knn_ART25 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=25, n_jobs=-1)\n\n model_knn_song40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_tag40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_title40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_title_gnr40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_times40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_GEN40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n model_knn_ART40 = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=40, n_jobs=-1)\n\n\n\n model_knn_song25.fit(song_sp)\n model_knn_tag25.fit(tag_sp)\n model_knn_title25.fit(title_sp)\n model_knn_title_gnr25.fit(title_gnr)\n model_knn_times25.fit(times_sp)\n model_knn_GEN25.fit(GEN_sp)\n model_knn_ART25.fit(ART_sp)\n\n model_knn_song40.fit(song_sp)\n model_knn_tag40.fit(tag_sp)\n model_knn_title40.fit(title_sp)\n model_knn_title_gnr40.fit(title_gnr)\n model_knn_times40.fit(times_sp)\n model_knn_GEN40.fit(GEN_sp)\n model_knn_ART40.fit(ART_sp)\n\n\n\n\n\n\n train.loc[:,'num_songs'] = train['songs'].map(len)\n train.loc[:,'num_tags'] = train['tags_id'].map(len)\n\n data_all = pd.concat([train,val,test])\n\n data_all.index = range(len(data_all))\n\n\n res = []\n for i in tqdm_notebook(range(len(test))):\n data = test.iloc[i]\n pid = i\n\n if len(data['songs']) >= 2 and len(data['tags_id']) >=2 :\n p = np.zeros((707989,1))\n p[data['songs']] = 1\n\n pp = np.zeros((n_tags,1))\n pp[data['tags_id']] = 1\n\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n test_tag = cosine_similarity(tra_tag_sp,pp.T)\n\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n\n testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n ####### 40 ####################################################\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n tra_tag = data_all.iloc[model_knn_tag40.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(40), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n test_tag = cosine_similarity(tra_tag_sp,pp.T)\n\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n\n testi = test_song * test_tag * test_title_genre * test_tim * test_GEN * test_ART\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n\n cand_all = pd.merge(cand1,cand2,how='outer',on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])\n\n ######tag######\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n\n elif len(data['songs']) != 0:\n p = np.zeros((707989,1))\n p[data['songs']] = 1\n\n tra_song = data_all.iloc[model_knn_song25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n # tra_tag = data_all.iloc[model_knn_tag25.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(25), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n tra_tim = times_sp[model_knn_times25.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN25.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_ART = ART_sp[model_knn_ART25.kneighbors(ART_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr25.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n\n testi = test_song*test_title_genre*test_tim*test_GEN * test_ART\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거\n cand1 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n ####### 40 ####################################################\n tra_song = data_all.iloc[model_knn_song40.kneighbors(p.T)[1][0]]\n row = np.repeat(range(40), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n row = np.repeat(range(40), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(40, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n tra_tim = times_sp[model_knn_times40.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN40.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_ART = ART_sp[model_knn_ART40.kneighbors(ART_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr40.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_ART = cosine_similarity(tra_ART,ART_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n\n testi = test_song * test_title_genre * test_tim * test_GEN * test_ART\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False] # 중복제거\n cand2 = pd.DataFrame(cand_song).iloc[cand_song_idx].reset_index()\n\n cand_all = pd.merge(cand1,cand2,how='outer',on='index')\n cand_all = cand_all.fillna(0)\n cand_all['pred'] = (cand_all['0_x'] + cand_all['0_y'])/2\n cand_song_idx = list(cand_all.sort_values(by=['pred'],ascending=False)[:100]['index'])\n\n #######tag########\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n\n elif len(data['tags_id']) !=0:\n p = np.zeros((n_tags,1))\n p[data['tags_id']] = 1\n\n tra_tag = data_all.iloc[model_knn_tag25.kneighbors(p.T)[1][0]]\n row = np.repeat(range(25), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n row = np.repeat(range(25), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(25, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n testi = cosine_similarity(tra_tag_sp,pp.T)\n\n if len(data['plylst_title']) != 0 :\n tra_title_gnr = title_tdm[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])\n testi = testi * testi_title\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출\n\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": list(cand_song_idx),\n \"tags\": rec_tag_idx\n })\n\n else :\n cand_song = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n\n cand_tag = []\n for li in data_all.iloc[model_knn_title25.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)\n\n res.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n for i in range(len(res)):\n if len(res[i]['songs']) != 100:\n print('song 에서 {}번째 오류 발생'.format(i))\n\n if len(res[i]['tags']) != 10:\n print('tag 에서 {}번째 오류 발생'.format(i))\n\n rec = []\n for i in range(len(res)):\n rec.append({\n \"id\": res[i]['id'],\n \"songs\": list(res[i]['songs']),\n \"tags\": res[i]['tags']\n })\n\n result1 = pd.DataFrame(rec)\n\n model_knn_song = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_tag = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_title = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_title_gnr = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_times = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_GEN = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n model_knn_ART = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=50, n_jobs=-1)\n\n model_knn_song.fit(song_sp)\n model_knn_tag.fit(tag_sp)\n model_knn_title.fit(title_sp)\n model_knn_title_gnr.fit(title_gnr)\n model_knn_times.fit(times_sp)\n model_knn_GEN.fit(GEN_sp)\n model_knn_ART.fit(ART_sp)\n\n res2 = []\n for i in tqdm_notebook([1960, 6361, 8705, 9310, 10498]):\n data = test.iloc[i]\n pid = i\n\n if len(data['songs']) != 0 and len(data['tags_id']) != 0:\n p = np.zeros((707989,1))\n p[data['songs']] = 1\n\n pp = np.zeros((n_tags,1))\n pp[data['tags_id']] = 1\n\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(pp.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n test_tag = cosine_similarity(tra_tag_sp,pp.T)\n\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n\n testi = test_song * test_tag * test_title_genre * test_GEN\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출\n\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res2.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n\n elif len(data['songs']) != 0:\n p = np.zeros((707989,1))\n p[data['songs']] = 1\n\n tra_song = data_all.iloc[model_knn_song.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_song['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_song['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_song['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n row = np.repeat(range(50), tra_song['num_tags'])\n col = [tag for tags in tra_song['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_song['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n tra_tim = times_sp[model_knn_times.kneighbors(tim_test[i:(i+1)])[1][0]]\n tra_GEN = GEN_sp[model_knn_GEN.kneighbors(GEN_test[i:(i+1)])[1][0]]\n tra_title_gnr = title_gnr[model_knn_title_gnr.kneighbors(test_title_gnr[i:(i+1)])[1][0]]\n\n test_song = cosine_similarity(tra_song_sp,p.T)\n\n test_tim = cosine_similarity(tra_tim,tim_test[i:(i+1)])\n test_GEN = cosine_similarity(tra_GEN,GEN_test[i:(i+1)])\n test_title_genre = cosine_similarity(tra_title_gnr,test_title_gnr[i:(i+1)])\n testi = test_song*test_title_genre*test_tim*test_GEN\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-200:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출\n\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res2.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n elif len(data['tags_id']) !=0:\n p = np.zeros((n_tags,1))\n p[data['tags_id']] = 1\n\n tra_tag = data_all.iloc[model_knn_tag.kneighbors(p.T)[1][0]]\n row = np.repeat(range(50), tra_tag['num_songs']) # User Index 별 노래 개수만큼 만듦\n col = [song for songs in tra_tag['songs'] for song in songs] # Song dic number 추출\n dat = np.repeat(1, tra_tag['num_songs'].sum()) # User별 Song이 있는 부분에 1을 넣기위해 1과 전체 노래 개수만큼 만듦\n tra_song_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_songs)) # csr_matrix 제작\n tra_song_sp_T = tra_song_sp.T.tocsr()\n\n row = np.repeat(range(50), tra_tag['num_tags'])\n col = [tag for tags in tra_tag['tags_id'] for tag in tags]\n dat = np.repeat(1, tra_tag['num_tags'].sum())\n tra_tag_sp = spr.csr_matrix((dat, (row, col)), shape=(50, n_tags))\n tra_tag_sp_T = tra_tag_sp.T.tocsr()\n\n\n songs_already = data[\"songs\"]\n tags_already = data[\"tags_id\"]\n\n testi = cosine_similarity(tra_tag_sp,pp.T)\n\n if len(data['plylst_title']) != 0 :\n tra_title_gnr = title_tdm[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]]\n testi_title = cosine_similarity(tra_title_gnr,title_ts[i:(i+1)])\n testi = testi * testi_title\n\n cand_song = tra_song_sp_T.dot(testi) # 행에는 노래 열에는 유저 정보 %*% 유사한 유저 -> 유사한 노래에 대하여 높은 값 나옴\n cand_song_idx = cand_song.reshape(-1).argsort()[-300:][::-1] # 값이 높은 상위 120개 노래 추출\n\n cand_song_idx = cand_song_idx[np.isin(cand_song_idx, songs_already) == False][:100] # 중복되는 노래 있는지 확인하고 100개 추출\n\n cand_tag = tra_tag_sp_T.dot(testi) # 똑같은 작업 실시\n cand_tag_idx = cand_tag.reshape(-1).argsort()[-30:][::-1]\n\n cand_tag_idx = cand_tag_idx[np.isin(cand_tag_idx, tags_already) == False][:10]\n rec_tag_idx = [tag_tid_id[i] for i in cand_tag_idx]\n\n res2.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n else:\n cand_song = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].songs.to_list():\n for j in li:\n cand_song.append(j)\n\n cand_tag = []\n for li in data_all.iloc[model_knn_title.kneighbors(title_ts[i:(i+1)])[1][0]].tags.to_list():\n for j in li:\n cand_tag.append(j)\n\n cand_song_idx = list(pd.DataFrame(cand_song)[0].value_counts()[:100].index)\n rec_tag_idx = list(pd.DataFrame(cand_tag)[0].value_counts()[:10].index)\n\n res2.append({\n \"id\": test.loc[pid,'id'],\n \"songs\": cand_song_idx,\n \"tags\": rec_tag_idx\n })\n\n\n pd.DataFrame(res2)\n\n rec2 = []\n for i in range(len(res2)):\n rec2.append({\n \"id\": res2[i]['id'],\n \"songs\": list(res2[i]['songs']),\n \"tags\": res2[i]['tags']\n })\n\n result2 = pd.DataFrame(rec2)['songs']\n\n n_index = [10498,6361,1960,8705,9310]\n\n result2.index = n_index\n\n result1.loc[n_index,'songs'] = result2\n\n result1['songs'].apply(len).sort_values()\n #그럼에도 채워지지 않은 6361에 대해서 상위 100곡 추천\n s = []\n for song in train.songs.tolist():\n s += song\n r1 = dict(Counter(s))\n\n r_song = sorted(r1.items(), key=lambda x: -x[1])\n r_song_top = r_song[:100] # 몇 곡 할지도 정해야 함\n\n list_song = list(dict(r_song_top).keys())\n len(list_song)\n\n sub= []\n for j in range(len(result1)) :\n sub.append(result1.loc[j].to_dict())\n\n sub[6361]['songs'] = list_song\n\n pd.DataFrame(sub)['songs'].apply(len).sort_values()\n write_json(sub,'final_songs.json')\n return sub\n\nif __name__ == '__main__':\n\n _data = Dataset()\n\n pre_tag.run(_data.test,_data.n_songs,_data.n_tags,_data.spr_list,_data.tag_tid_id)\n final_tags = word2vec_for_tag.run(_data.total,_data.test)\n\n final_songs = song_inference()\n result = []\n for f_songs, f_tags in zip(final_songs,final_tags):\n result.append({\n 'id':f_songs['id'],\n 'songs':f_songs['songs'],\n 'tags':f_tags['tags']\n })\n write_json(result, 'results.json')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def __main__():
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret('TS/Active-Directory-Offboarding-Info')
ad_info_dict = json.loads(ad_info)
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.
PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[
'sa_password'], auto_bind=True, raise_exceptions=True)
search_filter = '(&(objectClass=user)(mail={}))'.format(email)
entry_generator = conn.extend.standard.paged_search(search_base=
search_base, search_filter=search_filter, search_scope=SUBTREE,
attributes=['memberOf'], paged_size=5, generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
try:
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')
conn.modify(dn, changes={'userAccountControl': (2, '514')})
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=
'<dn of new OU>')
conn.unbind()
return 'Success'
except NameError:
return (
'A user with that email address does not exist inside Active Directory'
)
def __main__():
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SSL_CERT_PATH = 'path/to/cert.pem'
FQDN = 'ad.example.com'
search_base = 'OU=Sites,DC=ad,DC=example,DC=com'
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret('TS/Active-Directory-Offboarding-Info')
ad_info_dict = json.loads(ad_info)
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.
PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[
'sa_password'], auto_bind=True, raise_exceptions=True)
search_filter = '(&(objectClass=user)(mail={}))'.format(email)
entry_generator = conn.extend.standard.paged_search(search_base=
search_base, search_filter=search_filter, search_scope=SUBTREE,
attributes=['memberOf'], paged_size=5, generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
try:
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')
conn.modify(dn, changes={'userAccountControl': (2, '514')})
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=
'<dn of new OU>')
conn.unbind()
return 'Success'
except NameError:
return (
'A user with that email address does not exist inside Active Directory'
)
def __main__():
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
<|reserved_special_token_1|>
import json
import os
import ssl
from ldap3 import Server, Connection, Tls, SUBTREE, ALL
SSL_CERT_PATH = 'path/to/cert.pem'
FQDN = 'ad.example.com'
search_base = 'OU=Sites,DC=ad,DC=example,DC=com'
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret('TS/Active-Directory-Offboarding-Info')
ad_info_dict = json.loads(ad_info)
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.
PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[
'sa_password'], auto_bind=True, raise_exceptions=True)
search_filter = '(&(objectClass=user)(mail={}))'.format(email)
entry_generator = conn.extend.standard.paged_search(search_base=
search_base, search_filter=search_filter, search_scope=SUBTREE,
attributes=['memberOf'], paged_size=5, generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
try:
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')
conn.modify(dn, changes={'userAccountControl': (2, '514')})
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=
'<dn of new OU>')
conn.unbind()
return 'Success'
except NameError:
return (
'A user with that email address does not exist inside Active Directory'
)
def __main__():
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
<|reserved_special_token_1|>
import json
import os
import ssl
from ldap3 import Server, Connection, Tls, SUBTREE, ALL
# Include root CA certificate path if you use a self signed AD certificate
SSL_CERT_PATH = "path/to/cert.pem"
# Include the FQDN of your Domain Controller here
FQDN = "ad.example.com"
# Search base is the CN of the container where your users live
search_base='OU=Sites,DC=ad,DC=example,DC=com'
def deprovision_AD(email):
memberOf_list = []
ad_info = get_secret("TS/Active-Directory-Offboarding-Info")
# TODO: Get the into from the secret above and turn into env variables instead
ad_info_dict = json.loads(ad_info)
# Binding to AD with latest form of TLS available
tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.PROTOCOL_TLS)
server = Server(FQDN, use_ssl=True, tls=tls_configuration)
conn = Connection(server, ad_info_dict["sa_username_dn"], ad_info_dict["sa_password"], auto_bind=True,
raise_exceptions=True)
# Find user in AD based off of 'mail' attribute
search_filter = "(&(objectClass=user)(mail={}))".format(email)
entry_generator = conn.extend.standard.paged_search(search_base=search_base,
search_filter=search_filter,
search_scope=SUBTREE,
attributes=['memberOf'],
paged_size=5,
generator=True)
for entry in entry_generator:
dn = entry['dn']
relative_dn = dn.split(',')[0]
groups = entry['raw_attributes']['memberOf']
for group in groups:
group_str = str(group)
memberOf_list.append(group_str[2:-1])
# There is a comment before each offboarding task. Comment out the ones you'd like to skip
try:
# Loop through groups and remove user from those groups
for group in memberOf_list:
conn.extend.microsoft.remove_members_from_groups(dn, group)
# Add user to security group
conn.extend.microsoft.add_members_to_groups(dn, "<dn of new group>")
# Disable account
conn.modify(dn, changes={'userAccountControl': (2, '514')})
# Move to different OU
conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior="<dn of new OU>")
# Delete account
## TODO: Figure out the command to delete the AD account
# Close connection
conn.unbind()
return "Success"
except NameError:
return "A user with that email address does not exist inside Active Directory"
def __main__():
# TODO: Figure out how to populate this as an env
email = input("Please input the departing user's email address: ")
ad_result = deprovision_AD(email)
print(ad_result)
|
flexible
|
{
"blob_id": "9ca5c052db43c1d8b0cafa18038b3ebcd80067f7",
"index": 4710,
"step-1": "<mask token>\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-2": "<mask token>\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-3": "<mask token>\nSSL_CERT_PATH = 'path/to/cert.pem'\nFQDN = 'ad.example.com'\nsearch_base = 'OU=Sites,DC=ad,DC=example,DC=com'\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-4": "import json\nimport os\nimport ssl\nfrom ldap3 import Server, Connection, Tls, SUBTREE, ALL\nSSL_CERT_PATH = 'path/to/cert.pem'\nFQDN = 'ad.example.com'\nsearch_base = 'OU=Sites,DC=ad,DC=example,DC=com'\n\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret('TS/Active-Directory-Offboarding-Info')\n ad_info_dict = json.loads(ad_info)\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.\n PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n conn = Connection(server, ad_info_dict['sa_username_dn'], ad_info_dict[\n 'sa_password'], auto_bind=True, raise_exceptions=True)\n search_filter = '(&(objectClass=user)(mail={}))'.format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=\n search_base, search_filter=search_filter, search_scope=SUBTREE,\n attributes=['memberOf'], paged_size=5, generator=True)\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n try:\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n conn.extend.microsoft.add_members_to_groups(dn, '<dn of new group>')\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\n '<dn of new OU>')\n conn.unbind()\n return 'Success'\n except NameError:\n return (\n 'A user with that email address does not exist inside Active Directory'\n )\n\n\ndef __main__():\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n",
"step-5": "import json\nimport os\n\nimport ssl\n\nfrom ldap3 import Server, Connection, Tls, SUBTREE, ALL\n\n# Include root CA certificate path if you use a self signed AD certificate\nSSL_CERT_PATH = \"path/to/cert.pem\"\n\n# Include the FQDN of your Domain Controller here\nFQDN = \"ad.example.com\"\n\n# Search base is the CN of the container where your users live\nsearch_base='OU=Sites,DC=ad,DC=example,DC=com'\n\ndef deprovision_AD(email):\n memberOf_list = []\n ad_info = get_secret(\"TS/Active-Directory-Offboarding-Info\")\n # TODO: Get the into from the secret above and turn into env variables instead\n ad_info_dict = json.loads(ad_info)\n\n # Binding to AD with latest form of TLS available\n tls_configuration = Tls(ca_certs_file=SSL_CERT_PATH, version=ssl.PROTOCOL_TLS)\n server = Server(FQDN, use_ssl=True, tls=tls_configuration)\n\n conn = Connection(server, ad_info_dict[\"sa_username_dn\"], ad_info_dict[\"sa_password\"], auto_bind=True,\n raise_exceptions=True)\n\n # Find user in AD based off of 'mail' attribute\n search_filter = \"(&(objectClass=user)(mail={}))\".format(email)\n entry_generator = conn.extend.standard.paged_search(search_base=search_base,\n search_filter=search_filter,\n search_scope=SUBTREE,\n attributes=['memberOf'],\n paged_size=5,\n generator=True)\n\n for entry in entry_generator:\n dn = entry['dn']\n relative_dn = dn.split(',')[0]\n groups = entry['raw_attributes']['memberOf']\n for group in groups:\n group_str = str(group)\n memberOf_list.append(group_str[2:-1])\n\n # There is a comment before each offboarding task. Comment out the ones you'd like to skip\n\n try:\n # Loop through groups and remove user from those groups\n for group in memberOf_list:\n conn.extend.microsoft.remove_members_from_groups(dn, group)\n\n # Add user to security group\n conn.extend.microsoft.add_members_to_groups(dn, \"<dn of new group>\")\n\n # Disable account\n conn.modify(dn, changes={'userAccountControl': (2, '514')})\n\n # Move to different OU\n conn.modify_dn(dn=dn, relative_dn=relative_dn, new_superior=\"<dn of new OU>\")\n\n # Delete account\n ## TODO: Figure out the command to delete the AD account\n\n # Close connection\n conn.unbind()\n return \"Success\"\n\n except NameError:\n return \"A user with that email address does not exist inside Active Directory\"\n\n\ndef __main__():\n # TODO: Figure out how to populate this as an env\n email = input(\"Please input the departing user's email address: \")\n ad_result = deprovision_AD(email)\n print(ad_result)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class _DatabaseResourceTableController:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) ->None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) ->typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash).filter(
ResourceTable.resourceName == resource_name).group_by(
ResourceTable.peerId).all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) ->typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash
).group_by(ResourceTable.peerId, ResourceTable.resourceHash
).all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) ->None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session.query(ResourceTable).filter(ResourceTable.peerId == peer_id
).delete()
session.commit()
finally:
session.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _DatabaseResourceTableController:
<|reserved_special_token_0|>
def __init__(self):
self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')
self.session = sessionmaker(bind=self.engine)
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) ->None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) ->typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash).filter(
ResourceTable.resourceName == resource_name).group_by(
ResourceTable.peerId).all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) ->typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash
).group_by(ResourceTable.peerId, ResourceTable.resourceHash
).all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) ->None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session.query(ResourceTable).filter(ResourceTable.peerId == peer_id
).delete()
session.commit()
finally:
session.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _DatabaseResourceTableController:
"""
Controller for resource table access
"""
def __init__(self):
self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')
self.session = sessionmaker(bind=self.engine)
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) ->None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) ->typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash).filter(
ResourceTable.resourceName == resource_name).group_by(
ResourceTable.peerId).all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) ->typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash
).group_by(ResourceTable.peerId, ResourceTable.resourceHash
).all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) ->None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session.query(ResourceTable).filter(ResourceTable.peerId == peer_id
).delete()
session.commit()
finally:
session.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__authors__ = ['Gabriel Castro', 'Gustavo Possebon', 'Henrique Kops']
__date__ = '24/10/2020'
class _DatabaseResourceTableController:
"""
Controller for resource table access
"""
def __init__(self):
self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')
self.session = sessionmaker(bind=self.engine)
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) ->None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) ->typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash).filter(
ResourceTable.resourceName == resource_name).group_by(
ResourceTable.peerId).all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) ->typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session.query(ResourceTable.peerIp,
ResourceTable.peerPort, ResourceTable.resourcePath,
ResourceTable.resourceName, ResourceTable.resourceHash
).group_by(ResourceTable.peerId, ResourceTable.resourceHash
).all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) ->None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session.query(ResourceTable).filter(ResourceTable.peerId == peer_id
).delete()
session.commit()
finally:
session.close()
@functools.lru_cache()
def get_database_resource_table_controller() ->[
_DatabaseResourceTableController]:
"""
Singleton for DatabaseResourceTableController class
:return: Same instance for DatabaseResourceTableController class
"""
return _DatabaseResourceTableController()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module that defines a controller for database's operations over business rules
"""
# built-in dependencies
import functools
import typing
# external dependencies
import sqlalchemy
from sqlalchemy.orm import sessionmaker
# project dependencies
from database.table import ResourceTable
__authors__ = ["Gabriel Castro", "Gustavo Possebon", "Henrique Kops"]
__date__ = "24/10/2020"
class _DatabaseResourceTableController:
"""
Controller for resource table access
"""
def __init__(self):
# sqlalchemy
self.engine = sqlalchemy.create_engine("sqlite:///db.sqlite3")
self.session = sessionmaker(bind=self.engine)
def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,
resource_name: str, resource_path: str, resource_hash: str) -> None:
"""
Register 'peer x resource' relationship at database
:param peer_id: Peer's id
:param peer_ip: Peer's ip
:param peer_port: Peer's listen port
:param resource_name: Resource's name
:param resource_path: Resource's path
:param resource_hash: Resource's MD5
"""
session = self.session()
try:
new_resource = ResourceTable()
new_resource.peerId = peer_id
new_resource.peerIp = peer_ip
new_resource.peerPort = peer_port
new_resource.resourceName = resource_name
new_resource.resourcePath = resource_path
new_resource.resourceHash = resource_hash
session.add(new_resource)
session.commit()
finally:
session.close()
def get_available_peer(self, resource_name: str) -> typing.List:
"""
Get peer's ip and port and resource's path, name and hash
that contains same resource name
:param resource_name: Name of the resource to be searched at database
:return: List containing matching peer's and resource's info
"""
session = self.session()
try:
available_peers = session\
.query(
ResourceTable.peerIp,
ResourceTable.peerPort,
ResourceTable.resourcePath,
ResourceTable.resourceName,
ResourceTable.resourceHash
)\
.filter(ResourceTable.resourceName == resource_name)\
.group_by(ResourceTable.peerId)\
.all()
if available_peers:
return available_peers[0]
else:
return []
finally:
session.close()
def get_all_resources(self) -> typing.List:
"""
Get every register of peer's ip and port and resource's path, name and hash
:return: List of every 'peer x resource' info
"""
session = self.session()
try:
available_peers = session\
.query(
ResourceTable.peerIp,
ResourceTable.peerPort,
ResourceTable.resourcePath,
ResourceTable.resourceName,
ResourceTable.resourceHash
)\
.group_by(ResourceTable.peerId, ResourceTable.resourceHash)\
.all()
return available_peers
finally:
session.close()
def drop_peer(self, peer_id: str) -> None:
"""
Delete every record that contains same peer's id
:param peer_id: Peer's ip to be used as filter
"""
session = self.session()
try:
session\
.query(ResourceTable)\
.filter(ResourceTable.peerId == peer_id)\
.delete()
session.commit()
finally:
session.close()
@functools.lru_cache()
def get_database_resource_table_controller() -> [_DatabaseResourceTableController]:
"""
Singleton for DatabaseResourceTableController class
:return: Same instance for DatabaseResourceTableController class
"""
return _DatabaseResourceTableController()
|
flexible
|
{
"blob_id": "c024e12fe06e47187c25a9f384ceed566bf94645",
"index": 6909,
"step-1": "<mask token>\n\n\nclass _DatabaseResourceTableController:\n <mask token>\n <mask token>\n\n def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,\n resource_name: str, resource_path: str, resource_hash: str) ->None:\n \"\"\"\n Register 'peer x resource' relationship at database\n\n :param peer_id: Peer's id\n :param peer_ip: Peer's ip\n :param peer_port: Peer's listen port\n :param resource_name: Resource's name\n :param resource_path: Resource's path\n :param resource_hash: Resource's MD5\n \"\"\"\n session = self.session()\n try:\n new_resource = ResourceTable()\n new_resource.peerId = peer_id\n new_resource.peerIp = peer_ip\n new_resource.peerPort = peer_port\n new_resource.resourceName = resource_name\n new_resource.resourcePath = resource_path\n new_resource.resourceHash = resource_hash\n session.add(new_resource)\n session.commit()\n finally:\n session.close()\n\n def get_available_peer(self, resource_name: str) ->typing.List:\n \"\"\"\n Get peer's ip and port and resource's path, name and hash\n that contains same resource name\n\n :param resource_name: Name of the resource to be searched at database\n :return: List containing matching peer's and resource's info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash).filter(\n ResourceTable.resourceName == resource_name).group_by(\n ResourceTable.peerId).all()\n if available_peers:\n return available_peers[0]\n else:\n return []\n finally:\n session.close()\n\n def get_all_resources(self) ->typing.List:\n \"\"\"\n Get every register of peer's ip and port and resource's path, name and hash\n\n :return: List of every 'peer x resource' info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash\n ).group_by(ResourceTable.peerId, ResourceTable.resourceHash\n ).all()\n return available_peers\n finally:\n session.close()\n\n def drop_peer(self, peer_id: str) ->None:\n \"\"\"\n Delete every record that contains same peer's id\n\n :param peer_id: Peer's ip to be used as filter\n \"\"\"\n session = self.session()\n try:\n session.query(ResourceTable).filter(ResourceTable.peerId == peer_id\n ).delete()\n session.commit()\n finally:\n session.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _DatabaseResourceTableController:\n <mask token>\n\n def __init__(self):\n self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')\n self.session = sessionmaker(bind=self.engine)\n\n def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,\n resource_name: str, resource_path: str, resource_hash: str) ->None:\n \"\"\"\n Register 'peer x resource' relationship at database\n\n :param peer_id: Peer's id\n :param peer_ip: Peer's ip\n :param peer_port: Peer's listen port\n :param resource_name: Resource's name\n :param resource_path: Resource's path\n :param resource_hash: Resource's MD5\n \"\"\"\n session = self.session()\n try:\n new_resource = ResourceTable()\n new_resource.peerId = peer_id\n new_resource.peerIp = peer_ip\n new_resource.peerPort = peer_port\n new_resource.resourceName = resource_name\n new_resource.resourcePath = resource_path\n new_resource.resourceHash = resource_hash\n session.add(new_resource)\n session.commit()\n finally:\n session.close()\n\n def get_available_peer(self, resource_name: str) ->typing.List:\n \"\"\"\n Get peer's ip and port and resource's path, name and hash\n that contains same resource name\n\n :param resource_name: Name of the resource to be searched at database\n :return: List containing matching peer's and resource's info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash).filter(\n ResourceTable.resourceName == resource_name).group_by(\n ResourceTable.peerId).all()\n if available_peers:\n return available_peers[0]\n else:\n return []\n finally:\n session.close()\n\n def get_all_resources(self) ->typing.List:\n \"\"\"\n Get every register of peer's ip and port and resource's path, name and hash\n\n :return: List of every 'peer x resource' info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash\n ).group_by(ResourceTable.peerId, ResourceTable.resourceHash\n ).all()\n return available_peers\n finally:\n session.close()\n\n def drop_peer(self, peer_id: str) ->None:\n \"\"\"\n Delete every record that contains same peer's id\n\n :param peer_id: Peer's ip to be used as filter\n \"\"\"\n session = self.session()\n try:\n session.query(ResourceTable).filter(ResourceTable.peerId == peer_id\n ).delete()\n session.commit()\n finally:\n session.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass _DatabaseResourceTableController:\n \"\"\"\n Controller for resource table access\n \"\"\"\n\n def __init__(self):\n self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')\n self.session = sessionmaker(bind=self.engine)\n\n def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,\n resource_name: str, resource_path: str, resource_hash: str) ->None:\n \"\"\"\n Register 'peer x resource' relationship at database\n\n :param peer_id: Peer's id\n :param peer_ip: Peer's ip\n :param peer_port: Peer's listen port\n :param resource_name: Resource's name\n :param resource_path: Resource's path\n :param resource_hash: Resource's MD5\n \"\"\"\n session = self.session()\n try:\n new_resource = ResourceTable()\n new_resource.peerId = peer_id\n new_resource.peerIp = peer_ip\n new_resource.peerPort = peer_port\n new_resource.resourceName = resource_name\n new_resource.resourcePath = resource_path\n new_resource.resourceHash = resource_hash\n session.add(new_resource)\n session.commit()\n finally:\n session.close()\n\n def get_available_peer(self, resource_name: str) ->typing.List:\n \"\"\"\n Get peer's ip and port and resource's path, name and hash\n that contains same resource name\n\n :param resource_name: Name of the resource to be searched at database\n :return: List containing matching peer's and resource's info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash).filter(\n ResourceTable.resourceName == resource_name).group_by(\n ResourceTable.peerId).all()\n if available_peers:\n return available_peers[0]\n else:\n return []\n finally:\n session.close()\n\n def get_all_resources(self) ->typing.List:\n \"\"\"\n Get every register of peer's ip and port and resource's path, name and hash\n\n :return: List of every 'peer x resource' info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash\n ).group_by(ResourceTable.peerId, ResourceTable.resourceHash\n ).all()\n return available_peers\n finally:\n session.close()\n\n def drop_peer(self, peer_id: str) ->None:\n \"\"\"\n Delete every record that contains same peer's id\n\n :param peer_id: Peer's ip to be used as filter\n \"\"\"\n session = self.session()\n try:\n session.query(ResourceTable).filter(ResourceTable.peerId == peer_id\n ).delete()\n session.commit()\n finally:\n session.close()\n\n\n<mask token>\n",
"step-4": "<mask token>\n__authors__ = ['Gabriel Castro', 'Gustavo Possebon', 'Henrique Kops']\n__date__ = '24/10/2020'\n\n\nclass _DatabaseResourceTableController:\n \"\"\"\n Controller for resource table access\n \"\"\"\n\n def __init__(self):\n self.engine = sqlalchemy.create_engine('sqlite:///db.sqlite3')\n self.session = sessionmaker(bind=self.engine)\n\n def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,\n resource_name: str, resource_path: str, resource_hash: str) ->None:\n \"\"\"\n Register 'peer x resource' relationship at database\n\n :param peer_id: Peer's id\n :param peer_ip: Peer's ip\n :param peer_port: Peer's listen port\n :param resource_name: Resource's name\n :param resource_path: Resource's path\n :param resource_hash: Resource's MD5\n \"\"\"\n session = self.session()\n try:\n new_resource = ResourceTable()\n new_resource.peerId = peer_id\n new_resource.peerIp = peer_ip\n new_resource.peerPort = peer_port\n new_resource.resourceName = resource_name\n new_resource.resourcePath = resource_path\n new_resource.resourceHash = resource_hash\n session.add(new_resource)\n session.commit()\n finally:\n session.close()\n\n def get_available_peer(self, resource_name: str) ->typing.List:\n \"\"\"\n Get peer's ip and port and resource's path, name and hash\n that contains same resource name\n\n :param resource_name: Name of the resource to be searched at database\n :return: List containing matching peer's and resource's info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash).filter(\n ResourceTable.resourceName == resource_name).group_by(\n ResourceTable.peerId).all()\n if available_peers:\n return available_peers[0]\n else:\n return []\n finally:\n session.close()\n\n def get_all_resources(self) ->typing.List:\n \"\"\"\n Get every register of peer's ip and port and resource's path, name and hash\n\n :return: List of every 'peer x resource' info\n \"\"\"\n session = self.session()\n try:\n available_peers = session.query(ResourceTable.peerIp,\n ResourceTable.peerPort, ResourceTable.resourcePath,\n ResourceTable.resourceName, ResourceTable.resourceHash\n ).group_by(ResourceTable.peerId, ResourceTable.resourceHash\n ).all()\n return available_peers\n finally:\n session.close()\n\n def drop_peer(self, peer_id: str) ->None:\n \"\"\"\n Delete every record that contains same peer's id\n\n :param peer_id: Peer's ip to be used as filter\n \"\"\"\n session = self.session()\n try:\n session.query(ResourceTable).filter(ResourceTable.peerId == peer_id\n ).delete()\n session.commit()\n finally:\n session.close()\n\n\n@functools.lru_cache()\ndef get_database_resource_table_controller() ->[\n _DatabaseResourceTableController]:\n \"\"\"\n Singleton for DatabaseResourceTableController class\n\n :return: Same instance for DatabaseResourceTableController class\n \"\"\"\n return _DatabaseResourceTableController()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that defines a controller for database's operations over business rules\n\"\"\"\n\n# built-in dependencies\nimport functools\nimport typing\n\n# external dependencies\nimport sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\n\n# project dependencies\nfrom database.table import ResourceTable\n\n__authors__ = [\"Gabriel Castro\", \"Gustavo Possebon\", \"Henrique Kops\"]\n__date__ = \"24/10/2020\"\n\n\nclass _DatabaseResourceTableController:\n \"\"\"\n Controller for resource table access\n \"\"\"\n\n def __init__(self):\n # sqlalchemy\n self.engine = sqlalchemy.create_engine(\"sqlite:///db.sqlite3\")\n self.session = sessionmaker(bind=self.engine)\n\n def register_peer(self, peer_id: str, peer_ip: str, peer_port: int,\n resource_name: str, resource_path: str, resource_hash: str) -> None:\n \"\"\"\n Register 'peer x resource' relationship at database\n\n :param peer_id: Peer's id\n :param peer_ip: Peer's ip\n :param peer_port: Peer's listen port\n :param resource_name: Resource's name\n :param resource_path: Resource's path\n :param resource_hash: Resource's MD5\n \"\"\"\n\n session = self.session()\n\n try:\n new_resource = ResourceTable()\n\n new_resource.peerId = peer_id\n new_resource.peerIp = peer_ip\n new_resource.peerPort = peer_port\n new_resource.resourceName = resource_name\n new_resource.resourcePath = resource_path\n new_resource.resourceHash = resource_hash\n\n session.add(new_resource)\n session.commit()\n\n finally:\n session.close()\n\n def get_available_peer(self, resource_name: str) -> typing.List:\n \"\"\"\n Get peer's ip and port and resource's path, name and hash\n that contains same resource name\n\n :param resource_name: Name of the resource to be searched at database\n :return: List containing matching peer's and resource's info\n \"\"\"\n\n session = self.session()\n\n try:\n available_peers = session\\\n .query(\n ResourceTable.peerIp,\n ResourceTable.peerPort,\n ResourceTable.resourcePath,\n ResourceTable.resourceName,\n ResourceTable.resourceHash\n )\\\n .filter(ResourceTable.resourceName == resource_name)\\\n .group_by(ResourceTable.peerId)\\\n .all()\n\n if available_peers:\n return available_peers[0]\n\n else:\n return []\n\n finally:\n session.close()\n\n def get_all_resources(self) -> typing.List:\n \"\"\"\n Get every register of peer's ip and port and resource's path, name and hash\n\n :return: List of every 'peer x resource' info\n \"\"\"\n\n session = self.session()\n\n try:\n available_peers = session\\\n .query(\n ResourceTable.peerIp,\n ResourceTable.peerPort,\n ResourceTable.resourcePath,\n ResourceTable.resourceName,\n ResourceTable.resourceHash\n )\\\n .group_by(ResourceTable.peerId, ResourceTable.resourceHash)\\\n .all()\n\n return available_peers\n\n finally:\n session.close()\n\n def drop_peer(self, peer_id: str) -> None:\n \"\"\"\n Delete every record that contains same peer's id\n\n :param peer_id: Peer's ip to be used as filter\n \"\"\"\n\n session = self.session()\n try:\n session\\\n .query(ResourceTable)\\\n .filter(ResourceTable.peerId == peer_id)\\\n .delete()\n session.commit()\n\n finally:\n session.close()\n\n\n@functools.lru_cache()\ndef get_database_resource_table_controller() -> [_DatabaseResourceTableController]:\n \"\"\"\n Singleton for DatabaseResourceTableController class\n\n :return: Same instance for DatabaseResourceTableController class\n \"\"\"\n\n return _DatabaseResourceTableController()\n",
"step-ids": [
5,
6,
7,
9,
11
]
}
|
[
5,
6,
7,
9,
11
] |
<|reserved_special_token_0|>
class UBDDPGAgent(Agent):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError(
'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'
)
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(
metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
return loss
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects
)
self.target_critic.compile(optimizer='sgd', loss='mse')
self.actor.compile(optimizer='sgd', loss='mse')
if self.target_model_update < 1.0:
critic_updates = get_soft_target_model_updates(self.
target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,
critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error
] * self.nb_players, metrics=critic_metrics)
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes,
actor_outputs):
critic_inputs[input_idx] = actor_output
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None] * self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes,
critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(params=self.actor.
trainable_weights, loss=actor_losses)
if self.target_model_update < 1.0:
updates += get_soft_target_model_updates(self.target_actor,
self.actor, self.target_model_update)
updates += self.actor.updates
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.
learning_phase()], actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs,
updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions = [actions]
assert len(actions) == self.nb_players
assert actions[0].shape == (1, self.nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [qv.flatten() for qv in self.critic.predict_on_batch(
state_batch_with_action)]
assert q_values[0].shape == (1,)
assert len(q_values) == self.nb_players
action_best = actions[np.argmax(q_values)].flatten()
assert action_best.shape == (self.nb_actions,)
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
<|reserved_special_token_0|>
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action,
reward, terminal, training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
return metrics
can_train_either = (self.step > self.nb_steps_warmup_critic or self
.step > self.nb_steps_warmup_actor)
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0.0 if e.terminal1 else 1.0)
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(
state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.
nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
for action_idx, input_idx in enumerate(self.
critic_action_input_idxes):
state1_batch_with_action.insert(input_idx,
target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(
state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == reward_batch.shape
assert len(target_q_values) == self.nb_players
discounted_reward_batch = [(self.gamma * terminal1_batch *
tqv) for tqv in target_q_values]
assert discounted_reward_batch[0].shape == reward_batch.shape
targets = [(reward_batch + drb) for drb in
discounted_reward_batch]
assert targets[0].shape == reward_batch.shape
assert len(targets) == self.nb_players
if len(self.critic.inputs) > self.nb_players + 1:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
for input_idx in self.critic_action_input_idxes:
state0_batch_with_action.insert(input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action,
targets)
if self.processor is not None:
metrics += self.processor.metrics
if self.step > self.nb_steps_warmup_actor:
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)
assert len(action_values) == self.nb_players
assert action_values[0].shape == (self.batch_size, self.
nb_actions)
if (self.target_model_update >= 1 and self.step % self.
target_model_update == 0):
self.update_target_models_hard()
return metrics
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UBDDPGAgent(Agent):
<|reserved_special_token_0|>
def __init__(self, nb_actions, actor, critic, nb_players,
critic_action_inputs, memory, gamma=0.99, batch_size=32,
nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=
np.inf, random_process=None, custom_model_objects={},
target_model_update=0.001, **kwargs):
assert len(critic_action_inputs) == nb_players
if hasattr(actor.output, '__len__') and len(actor.output
) != nb_players:
raise ValueError((
'Actor "{}" does not have the right number of ',
'outputs. DDPG expects an actor that has {} outputs.').
format(actor, nb_players))
for critic_action_input in critic_action_inputs:
if critic_action_input not in critic.input:
raise ValueError(
'Critic "{}" does not have designated action input "{}".'
.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError(
'Critic "{}" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'
.format(critic))
super(UBDDPGAgent, self).__init__(**kwargs)
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
target_model_update = int(target_model_update)
else:
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn(
"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`"
.format(delta_range[1]))
delta_clip = delta_range[1]
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
self.actor = actor
self.critic = critic
self.nb_players = nb_players
self.critic_action_inputs = critic_action_inputs
self.critic_action_input_idxes = [self.critic.input.index(
critic_action_input) for critic_action_input in
critic_action_inputs]
self.memory = memory
self.compiled = False
self.reset_states()
<|reserved_special_token_0|>
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError(
'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'
)
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(
metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
return loss
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects
)
self.target_critic.compile(optimizer='sgd', loss='mse')
self.actor.compile(optimizer='sgd', loss='mse')
if self.target_model_update < 1.0:
critic_updates = get_soft_target_model_updates(self.
target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,
critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error
] * self.nb_players, metrics=critic_metrics)
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes,
actor_outputs):
critic_inputs[input_idx] = actor_output
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None] * self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes,
critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(params=self.actor.
trainable_weights, loss=actor_losses)
if self.target_model_update < 1.0:
updates += get_soft_target_model_updates(self.target_actor,
self.actor, self.target_model_update)
updates += self.actor.updates
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.
learning_phase()], actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs,
updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
<|reserved_special_token_0|>
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions = [actions]
assert len(actions) == self.nb_players
assert actions[0].shape == (1, self.nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [qv.flatten() for qv in self.critic.predict_on_batch(
state_batch_with_action)]
assert q_values[0].shape == (1,)
assert len(q_values) == self.nb_players
action_best = actions[np.argmax(q_values)].flatten()
assert action_best.shape == (self.nb_actions,)
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
def forward(self, observation):
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action,
reward, terminal, training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
return metrics
can_train_either = (self.step > self.nb_steps_warmup_critic or self
.step > self.nb_steps_warmup_actor)
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0.0 if e.terminal1 else 1.0)
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(
state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.
nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
for action_idx, input_idx in enumerate(self.
critic_action_input_idxes):
state1_batch_with_action.insert(input_idx,
target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(
state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == reward_batch.shape
assert len(target_q_values) == self.nb_players
discounted_reward_batch = [(self.gamma * terminal1_batch *
tqv) for tqv in target_q_values]
assert discounted_reward_batch[0].shape == reward_batch.shape
targets = [(reward_batch + drb) for drb in
discounted_reward_batch]
assert targets[0].shape == reward_batch.shape
assert len(targets) == self.nb_players
if len(self.critic.inputs) > self.nb_players + 1:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
for input_idx in self.critic_action_input_idxes:
state0_batch_with_action.insert(input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action,
targets)
if self.processor is not None:
metrics += self.processor.metrics
if self.step > self.nb_steps_warmup_actor:
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)
assert len(action_values) == self.nb_players
assert action_values[0].shape == (self.batch_size, self.
nb_actions)
if (self.target_model_update >= 1 and self.step % self.
target_model_update == 0):
self.update_target_models_hard()
return metrics
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UBDDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, nb_players,
critic_action_inputs, memory, gamma=0.99, batch_size=32,
nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=
np.inf, random_process=None, custom_model_objects={},
target_model_update=0.001, **kwargs):
assert len(critic_action_inputs) == nb_players
if hasattr(actor.output, '__len__') and len(actor.output
) != nb_players:
raise ValueError((
'Actor "{}" does not have the right number of ',
'outputs. DDPG expects an actor that has {} outputs.').
format(actor, nb_players))
for critic_action_input in critic_action_inputs:
if critic_action_input not in critic.input:
raise ValueError(
'Critic "{}" does not have designated action input "{}".'
.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError(
'Critic "{}" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'
.format(critic))
super(UBDDPGAgent, self).__init__(**kwargs)
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
target_model_update = int(target_model_update)
else:
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn(
"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`"
.format(delta_range[1]))
delta_clip = delta_range[1]
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
self.actor = actor
self.critic = critic
self.nb_players = nb_players
self.critic_action_inputs = critic_action_inputs
self.critic_action_input_idxes = [self.critic.input.index(
critic_action_input) for critic_action_input in
critic_action_inputs]
self.memory = memory
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return (self.actor.uses_learning_phase or self.critic.
uses_learning_phase)
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError(
'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'
)
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(
metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
return loss
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects
)
self.target_critic.compile(optimizer='sgd', loss='mse')
self.actor.compile(optimizer='sgd', loss='mse')
if self.target_model_update < 1.0:
critic_updates = get_soft_target_model_updates(self.
target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,
critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error
] * self.nb_players, metrics=critic_metrics)
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes,
actor_outputs):
critic_inputs[input_idx] = actor_output
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None] * self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes,
critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(params=self.actor.
trainable_weights, loss=actor_losses)
if self.target_model_update < 1.0:
updates += get_soft_target_model_updates(self.target_actor,
self.actor, self.target_model_update)
updates += self.actor.updates
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.
learning_phase()], actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs,
updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions = [actions]
assert len(actions) == self.nb_players
assert actions[0].shape == (1, self.nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [qv.flatten() for qv in self.critic.predict_on_batch(
state_batch_with_action)]
assert q_values[0].shape == (1,)
assert len(q_values) == self.nb_players
action_best = actions[np.argmax(q_values)].flatten()
assert action_best.shape == (self.nb_actions,)
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
def forward(self, observation):
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action,
reward, terminal, training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
return metrics
can_train_either = (self.step > self.nb_steps_warmup_critic or self
.step > self.nb_steps_warmup_actor)
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0.0 if e.terminal1 else 1.0)
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(
state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.
nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
for action_idx, input_idx in enumerate(self.
critic_action_input_idxes):
state1_batch_with_action.insert(input_idx,
target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(
state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == reward_batch.shape
assert len(target_q_values) == self.nb_players
discounted_reward_batch = [(self.gamma * terminal1_batch *
tqv) for tqv in target_q_values]
assert discounted_reward_batch[0].shape == reward_batch.shape
targets = [(reward_batch + drb) for drb in
discounted_reward_batch]
assert targets[0].shape == reward_batch.shape
assert len(targets) == self.nb_players
if len(self.critic.inputs) > self.nb_players + 1:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
for input_idx in self.critic_action_input_idxes:
state0_batch_with_action.insert(input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action,
targets)
if self.processor is not None:
metrics += self.processor.metrics
if self.step > self.nb_steps_warmup_actor:
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)
assert len(action_values) == self.nb_players
assert action_values[0].shape == (self.batch_size, self.
nb_actions)
if (self.target_model_update >= 1 and self.step % self.
target_model_update == 0):
self.update_target_models_hard()
return metrics
<|reserved_special_token_1|>
from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.layers as layers
import keras.optimizers as optimizers
from rl.core import Agent
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
class UBDDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, nb_players,
critic_action_inputs, memory, gamma=0.99, batch_size=32,
nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=
np.inf, random_process=None, custom_model_objects={},
target_model_update=0.001, **kwargs):
assert len(critic_action_inputs) == nb_players
if hasattr(actor.output, '__len__') and len(actor.output
) != nb_players:
raise ValueError((
'Actor "{}" does not have the right number of ',
'outputs. DDPG expects an actor that has {} outputs.').
format(actor, nb_players))
for critic_action_input in critic_action_inputs:
if critic_action_input not in critic.input:
raise ValueError(
'Critic "{}" does not have designated action input "{}".'
.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError(
'Critic "{}" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'
.format(critic))
super(UBDDPGAgent, self).__init__(**kwargs)
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
target_model_update = int(target_model_update)
else:
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn(
"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`"
.format(delta_range[1]))
delta_clip = delta_range[1]
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
self.actor = actor
self.critic = critic
self.nb_players = nb_players
self.critic_action_inputs = critic_action_inputs
self.critic_action_input_idxes = [self.critic.input.index(
critic_action_input) for critic_action_input in
critic_action_inputs]
self.memory = memory
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return (self.actor.uses_learning_phase or self.critic.
uses_learning_phase)
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError(
'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'
)
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(
metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
return loss
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects
)
self.target_critic.compile(optimizer='sgd', loss='mse')
self.actor.compile(optimizer='sgd', loss='mse')
if self.target_model_update < 1.0:
critic_updates = get_soft_target_model_updates(self.
target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,
critic_updates)
self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error
] * self.nb_players, metrics=critic_metrics)
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes,
actor_outputs):
critic_inputs[input_idx] = actor_output
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None] * self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes,
critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(params=self.actor.
trainable_weights, loss=actor_losses)
if self.target_model_update < 1.0:
updates += get_soft_target_model_updates(self.target_actor,
self.actor, self.target_model_update)
updates += self.actor.updates
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.
learning_phase()], actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs,
updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions = [actions]
assert len(actions) == self.nb_players
assert actions[0].shape == (1, self.nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [qv.flatten() for qv in self.critic.predict_on_batch(
state_batch_with_action)]
assert q_values[0].shape == (1,)
assert len(q_values) == self.nb_players
action_best = actions[np.argmax(q_values)].flatten()
assert action_best.shape == (self.nb_actions,)
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
def forward(self, observation):
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action,
reward, terminal, training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
return metrics
can_train_either = (self.step > self.nb_steps_warmup_critic or self
.step > self.nb_steps_warmup_actor)
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0.0 if e.terminal1 else 1.0)
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(
state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.
nb_actions)
if len(self.critic.inputs) > self.nb_players + 1:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
for action_idx, input_idx in enumerate(self.
critic_action_input_idxes):
state1_batch_with_action.insert(input_idx,
target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(
state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == reward_batch.shape
assert len(target_q_values) == self.nb_players
discounted_reward_batch = [(self.gamma * terminal1_batch *
tqv) for tqv in target_q_values]
assert discounted_reward_batch[0].shape == reward_batch.shape
targets = [(reward_batch + drb) for drb in
discounted_reward_batch]
assert targets[0].shape == reward_batch.shape
assert len(targets) == self.nb_players
if len(self.critic.inputs) > self.nb_players + 1:
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
for input_idx in self.critic_action_input_idxes:
state0_batch_with_action.insert(input_idx, action_batch)
metrics = self.critic.train_on_batch(state0_batch_with_action,
targets)
if self.processor is not None:
metrics += self.processor.metrics
if self.step > self.nb_steps_warmup_actor:
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)
assert len(action_values) == self.nb_players
assert action_values[0].shape == (self.batch_size, self.
nb_actions)
if (self.target_model_update >= 1 and self.step % self.
target_model_update == 0):
self.update_target_models_hard()
return metrics
<|reserved_special_token_1|>
from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.layers as layers
import keras.optimizers as optimizers
from rl.core import Agent
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
# Deep DPG as described by Lillicrap et al. (2015)
# http://arxiv.org/pdf/1509.02971v2.pdf
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf
class UBDDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, nb_players, critic_action_inputs, memory,
gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,
random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):
assert len(critic_action_inputs) == nb_players
if hasattr(actor.output, '__len__') and len(actor.output) != nb_players:
raise ValueError((
'Actor "{}" does not have the right number of ',
'outputs. DDPG expects an actor that has {} outputs.'
).format(actor, nb_players))
# if hasattr(critic.output, '__len__') and len(critic.output) > 1:
# raise ValueError('Critic "{}" has more than one output. DDPG expects a critic that has a single output.'.format(critic))
for critic_action_input in critic_action_inputs:
if critic_action_input not in critic.input:
raise ValueError('Critic "{}" does not have designated action input "{}".'.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError('Critic "{}" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'.format(critic))
super(UBDDPGAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
# Related objects.
self.actor = actor
self.critic = critic
self.nb_players = nb_players
self.critic_action_inputs = critic_action_inputs
self.critic_action_input_idxes = [
self.critic.input.index(critic_action_input)
for critic_action_input in critic_action_inputs
]
self.memory = memory
# State.
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return self.actor.uses_learning_phase or self.critic.uses_learning_phase
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(
# K.random_uniform(shape=(self.batch_size, self.nb_players), minval=0., maxval=1.) *
huber_loss(y_true, y_pred, self.delta_clip),
axis=-1)
# y_true = K.print_tensor(y_true, message='y_true: ')
# y_pred = K.print_tensor(y_pred, message='y_pred: ')
# loss = K.print_tensor(loss, message='loss: ')
return loss
# Compile target networks. We only use them in feed-forward mode, hence we can pass any
# optimizer and loss since we never use it anyway.
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects)
self.target_critic.compile(optimizer='sgd', loss='mse')
# We also compile the actor. We never optimize the actor using Keras but instead compute
# the policy gradient ourselves. However, we need the actor in feed-forward mode, hence
# we also compile it with any optimzer and
self.actor.compile(optimizer='sgd', loss='mse')
# Compile the critic.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)
self.critic.compile(
optimizer=critic_optimizer,
loss=[clipped_error]*self.nb_players,
metrics=critic_metrics)
# Combine actor and critic so that we can get the policy gradient.
# Assuming critic's state inputs are the same as actor's.
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes, actor_outputs):
critic_inputs[input_idx] = actor_output
# critic_outputs = layers.Maximum()(self.critic(critic_inputs))
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None]* self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes, critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(
params=self.actor.trainable_weights,
loss=actor_losses)
if self.target_model_update < 1.:
# Include soft target model updates.
updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)
updates += self.actor.updates # include other updates of the actor, e.g. for BN
# Finally, combine it all into a callable function.
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.learning_phase()],
actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs, updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
# TODO: implement pickle
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
# actions = [action.flatten() for action in self.actor.predict_on_batch(batch)]
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions =[actions]
# actions = [a.flatten() for a in actions]
assert len(actions) == self.nb_players
# assert actions[0].shape == (self.nb_actions,)
assert actions[0].shape == (1, self.nb_actions)
# print('actions: {}'.format(actions))
if len(self.critic.inputs) > (self.nb_players+1): # state is a list
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [
qv.flatten()
for qv in self.critic.predict_on_batch(state_batch_with_action)
]
assert q_values[0].shape == (1, )
assert len(q_values) == self.nb_players
# print('q_values: {}'.format(q_values))
action_best = actions[np.argmax(q_values)].flatten()
# assert action_best.shape == (self.nb_actions, )
assert action_best.shape == (self.nb_actions, )
# print('action_best: {}'.format(action_best))
# print(type(action_best[0]))
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state) # TODO: move this into policy
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Update critic, if warm up is over.
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.nb_actions)
if len(self.critic.inputs) > (self.nb_players+1): # state is a list
# if len(self.critic.inputs) >= 3:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
# state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state1_batch_with_action.insert(input_idx, target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [ tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == reward_batch.shape
assert len(target_q_values) == self.nb_players
# Compute r_t + gamma * Q(s_t+1, mu(s_t+1)) and update the target ys accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = [
self.gamma * terminal1_batch * tqv
for tqv in target_q_values
]
assert discounted_reward_batch[0].shape == reward_batch.shape
targets = [reward_batch + drb for drb in discounted_reward_batch] # .reshape(self.batch_size, 1)
assert targets[0].shape == reward_batch.shape
assert len(targets) == self.nb_players
# Perform a single batch update on the critic network.
# if len(self.critic.inputs) >= 3:
if len(self.critic.inputs) > (self.nb_players+1): # state is a list
state0_batch_with_action = state0_batch[:]
else:
state0_batch_with_action = [state0_batch]
for input_idx in self.critic_action_input_idxes:
state0_batch_with_action.insert(input_idx, action_batch)
# state0_batch_with_action.insert(self.critic_action_input_idx, action_batch)
metrics = self.critic.train_on_batch(
state0_batch_with_action,
targets)
if self.processor is not None:
metrics += self.processor.metrics
# q_values = self.critic.predict_on_batch(state0_batch_with_action)
# if not isinstance(q_values, (list,)):
# q_values = [q_values]
# q_values = [ qv.flatten() for qv in q_values]
# print('gamma: {}'.format(self.gamma))
# print('terminal1_batch: {}'.format(terminal1_batch))
# print('target_q_values: {}'.format(target_q_values))
# print('discounted_reward_batch: {}'.format(discounted_reward_batch))
# print('reward_batch: {}'.format(reward_batch))
# print('targets: {}'.format(targets))
# print('current q values: {}'.format(q_values))
# Update actor, if warm up is over.
if self.step > self.nb_steps_warmup_actor:
# TODO: implement metrics for actor
if len(self.actor.inputs) >= 2:
inputs = state0_batch[:]
else:
inputs = [state0_batch]
if self.uses_learning_phase:
inputs += [self.training]
action_values = self.actor_train_fn(inputs)
assert len(action_values) == self.nb_players
assert action_values[0].shape == (self.batch_size, self.nb_actions)
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_models_hard()
return metrics
|
flexible
|
{
"blob_id": "a2fe62b6bbb6b753ef6aec6f44758b8aceeeafe6",
"index": 9691,
"step-1": "<mask token>\n\n\nclass UBDDPGAgent(Agent):\n <mask token>\n <mask token>\n <mask token>\n\n def compile(self, optimizer, metrics=[]):\n metrics += [mean_q]\n if type(optimizer) in (list, tuple):\n if len(optimizer) != 2:\n raise ValueError(\n 'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'\n )\n actor_optimizer, critic_optimizer = optimizer\n else:\n actor_optimizer = optimizer\n critic_optimizer = clone_optimizer(optimizer)\n if type(actor_optimizer) is str:\n actor_optimizer = optimizers.get(actor_optimizer)\n if type(critic_optimizer) is str:\n critic_optimizer = optimizers.get(critic_optimizer)\n assert actor_optimizer != critic_optimizer\n if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(\n metrics[1], '__len__'):\n actor_metrics, critic_metrics = metrics\n else:\n actor_metrics = critic_metrics = metrics\n\n def clipped_error(y_true, y_pred):\n y_true = K.squeeze(y_true, axis=-1)\n y_pred = K.squeeze(y_pred, axis=-1)\n loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)\n return loss\n self.target_actor = clone_model(self.actor, self.custom_model_objects)\n self.target_actor.compile(optimizer='sgd', loss='mse')\n self.target_critic = clone_model(self.critic, self.custom_model_objects\n )\n self.target_critic.compile(optimizer='sgd', loss='mse')\n self.actor.compile(optimizer='sgd', loss='mse')\n if self.target_model_update < 1.0:\n critic_updates = get_soft_target_model_updates(self.\n target_critic, self.critic, self.target_model_update)\n critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,\n critic_updates)\n self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error\n ] * self.nb_players, metrics=critic_metrics)\n critic_inputs = []\n actor_inputs = []\n for i in self.critic.input:\n if i in self.critic_action_inputs:\n critic_inputs.append([])\n else:\n critic_inputs.append(i)\n actor_inputs.append(i)\n actor_outputs = self.actor(actor_inputs)\n if not isinstance(actor_outputs, (list,)):\n actor_outputs = [actor_outputs]\n assert len(actor_outputs) == self.nb_players\n for input_idx, actor_output in zip(self.critic_action_input_idxes,\n actor_outputs):\n critic_inputs[input_idx] = actor_output\n critic_outputs = self.critic(critic_inputs)\n if not isinstance(critic_outputs, (list,)):\n critic_outputs = [critic_outputs]\n assert len(critic_outputs) == self.nb_players\n actor_losses = [None] * self.nb_players\n for input_idx, critic_output in zip(self.critic_action_input_idxes,\n critic_outputs):\n actor_losses[input_idx] = -K.mean(critic_output)\n updates = actor_optimizer.get_updates(params=self.actor.\n trainable_weights, loss=actor_losses)\n if self.target_model_update < 1.0:\n updates += get_soft_target_model_updates(self.target_actor,\n self.actor, self.target_model_update)\n updates += self.actor.updates\n if K.backend() == 'tensorflow':\n self.actor_train_fn = K.function(actor_inputs + [K.\n learning_phase()], actor_outputs, updates=updates)\n else:\n if self.uses_learning_phase:\n actor_inputs += [K.learning_phase()]\n self.actor_train_fn = K.function(actor_inputs, actor_outputs,\n updates=updates)\n self.actor_optimizer = actor_optimizer\n self.compiled = True\n <mask token>\n <mask token>\n\n def update_target_models_hard(self):\n self.target_critic.set_weights(self.critic.get_weights())\n self.target_actor.set_weights(self.actor.get_weights())\n\n def reset_states(self):\n if self.random_process is not None:\n self.random_process.reset_states()\n self.recent_action = None\n self.recent_observation = None\n if self.compiled:\n self.actor.reset_states()\n self.critic.reset_states()\n self.target_actor.reset_states()\n self.target_critic.reset_states()\n\n def process_state_batch(self, batch):\n batch = np.array(batch)\n if self.processor is None:\n return batch\n return self.processor.process_state_batch(batch)\n\n def select_action(self, state):\n batch = self.process_state_batch([state])\n actions = self.actor.predict_on_batch(batch)\n if self.nb_players == 1:\n actions = [actions]\n assert len(actions) == self.nb_players\n assert actions[0].shape == (1, self.nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state_batch_with_action = batch[:]\n else:\n state_batch_with_action = [batch]\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state_batch_with_action.insert(input_idx, actions[action_idx])\n q_values = [qv.flatten() for qv in self.critic.predict_on_batch(\n state_batch_with_action)]\n assert q_values[0].shape == (1,)\n assert len(q_values) == self.nb_players\n action_best = actions[np.argmax(q_values)].flatten()\n assert action_best.shape == (self.nb_actions,)\n if self.training and self.random_process is not None:\n noise = self.random_process.sample()\n assert noise.shape == action_best.shape\n action_best += noise\n return action_best\n <mask token>\n\n @property\n def layers(self):\n return self.actor.layers[:] + self.critic.layers[:]\n\n @property\n def metrics_names(self):\n names = self.critic.metrics_names[:]\n if self.processor is not None:\n names += self.processor.metrics_names[:]\n return names\n\n def backward(self, reward, terminal=False):\n if self.step % self.memory_interval == 0:\n self.memory.append(self.recent_observation, self.recent_action,\n reward, terminal, training=self.training)\n metrics = [np.nan for _ in self.metrics_names]\n if not self.training:\n return metrics\n can_train_either = (self.step > self.nb_steps_warmup_critic or self\n .step > self.nb_steps_warmup_actor)\n if can_train_either and self.step % self.train_interval == 0:\n experiences = self.memory.sample(self.batch_size)\n assert len(experiences) == self.batch_size\n state0_batch = []\n reward_batch = []\n action_batch = []\n terminal1_batch = []\n state1_batch = []\n for e in experiences:\n state0_batch.append(e.state0)\n state1_batch.append(e.state1)\n reward_batch.append(e.reward)\n action_batch.append(e.action)\n terminal1_batch.append(0.0 if e.terminal1 else 1.0)\n state0_batch = self.process_state_batch(state0_batch)\n state1_batch = self.process_state_batch(state1_batch)\n terminal1_batch = np.array(terminal1_batch)\n reward_batch = np.array(reward_batch)\n action_batch = np.array(action_batch)\n assert reward_batch.shape == (self.batch_size,)\n assert terminal1_batch.shape == reward_batch.shape\n assert action_batch.shape == (self.batch_size, self.nb_actions)\n if self.step > self.nb_steps_warmup_critic:\n target_actions = self.target_actor.predict_on_batch(\n state1_batch)\n if not isinstance(target_actions, (list,)):\n target_actions = [target_actions]\n assert len(target_actions) == self.nb_players\n assert target_actions[0].shape == (self.batch_size, self.\n nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state1_batch_with_action = state1_batch[:]\n else:\n state1_batch_with_action = [state1_batch]\n for action_idx, input_idx in enumerate(self.\n critic_action_input_idxes):\n state1_batch_with_action.insert(input_idx,\n target_actions[action_idx])\n target_q_values = self.target_critic.predict_on_batch(\n state1_batch_with_action)\n if not isinstance(target_q_values, (list,)):\n target_q_values = [target_q_values]\n target_q_values = [tqv.flatten() for tqv in target_q_values]\n assert target_q_values[0].shape == reward_batch.shape\n assert len(target_q_values) == self.nb_players\n discounted_reward_batch = [(self.gamma * terminal1_batch *\n tqv) for tqv in target_q_values]\n assert discounted_reward_batch[0].shape == reward_batch.shape\n targets = [(reward_batch + drb) for drb in\n discounted_reward_batch]\n assert targets[0].shape == reward_batch.shape\n assert len(targets) == self.nb_players\n if len(self.critic.inputs) > self.nb_players + 1:\n state0_batch_with_action = state0_batch[:]\n else:\n state0_batch_with_action = [state0_batch]\n for input_idx in self.critic_action_input_idxes:\n state0_batch_with_action.insert(input_idx, action_batch)\n metrics = self.critic.train_on_batch(state0_batch_with_action,\n targets)\n if self.processor is not None:\n metrics += self.processor.metrics\n if self.step > self.nb_steps_warmup_actor:\n if len(self.actor.inputs) >= 2:\n inputs = state0_batch[:]\n else:\n inputs = [state0_batch]\n if self.uses_learning_phase:\n inputs += [self.training]\n action_values = self.actor_train_fn(inputs)\n assert len(action_values) == self.nb_players\n assert action_values[0].shape == (self.batch_size, self.\n nb_actions)\n if (self.target_model_update >= 1 and self.step % self.\n target_model_update == 0):\n self.update_target_models_hard()\n return metrics\n",
"step-2": "<mask token>\n\n\nclass UBDDPGAgent(Agent):\n <mask token>\n\n def __init__(self, nb_actions, actor, critic, nb_players,\n critic_action_inputs, memory, gamma=0.99, batch_size=32,\n nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,\n train_interval=1, memory_interval=1, delta_range=None, delta_clip=\n np.inf, random_process=None, custom_model_objects={},\n target_model_update=0.001, **kwargs):\n assert len(critic_action_inputs) == nb_players\n if hasattr(actor.output, '__len__') and len(actor.output\n ) != nb_players:\n raise ValueError((\n 'Actor \"{}\" does not have the right number of ',\n 'outputs. DDPG expects an actor that has {} outputs.').\n format(actor, nb_players))\n for critic_action_input in critic_action_inputs:\n if critic_action_input not in critic.input:\n raise ValueError(\n 'Critic \"{}\" does not have designated action input \"{}\".'\n .format(critic, critic_action_input))\n if not hasattr(critic.input, '__len__') or len(critic.input) < 2:\n raise ValueError(\n 'Critic \"{}\" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'\n .format(critic))\n super(UBDDPGAgent, self).__init__(**kwargs)\n if target_model_update < 0:\n raise ValueError('`target_model_update` must be >= 0.')\n elif target_model_update >= 1:\n target_model_update = int(target_model_update)\n else:\n target_model_update = float(target_model_update)\n if delta_range is not None:\n warnings.warn(\n \"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`\"\n .format(delta_range[1]))\n delta_clip = delta_range[1]\n self.nb_actions = nb_actions\n self.nb_steps_warmup_actor = nb_steps_warmup_actor\n self.nb_steps_warmup_critic = nb_steps_warmup_critic\n self.random_process = random_process\n self.delta_clip = delta_clip\n self.gamma = gamma\n self.target_model_update = target_model_update\n self.batch_size = batch_size\n self.train_interval = train_interval\n self.memory_interval = memory_interval\n self.custom_model_objects = custom_model_objects\n self.actor = actor\n self.critic = critic\n self.nb_players = nb_players\n self.critic_action_inputs = critic_action_inputs\n self.critic_action_input_idxes = [self.critic.input.index(\n critic_action_input) for critic_action_input in\n critic_action_inputs]\n self.memory = memory\n self.compiled = False\n self.reset_states()\n <mask token>\n\n def compile(self, optimizer, metrics=[]):\n metrics += [mean_q]\n if type(optimizer) in (list, tuple):\n if len(optimizer) != 2:\n raise ValueError(\n 'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'\n )\n actor_optimizer, critic_optimizer = optimizer\n else:\n actor_optimizer = optimizer\n critic_optimizer = clone_optimizer(optimizer)\n if type(actor_optimizer) is str:\n actor_optimizer = optimizers.get(actor_optimizer)\n if type(critic_optimizer) is str:\n critic_optimizer = optimizers.get(critic_optimizer)\n assert actor_optimizer != critic_optimizer\n if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(\n metrics[1], '__len__'):\n actor_metrics, critic_metrics = metrics\n else:\n actor_metrics = critic_metrics = metrics\n\n def clipped_error(y_true, y_pred):\n y_true = K.squeeze(y_true, axis=-1)\n y_pred = K.squeeze(y_pred, axis=-1)\n loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)\n return loss\n self.target_actor = clone_model(self.actor, self.custom_model_objects)\n self.target_actor.compile(optimizer='sgd', loss='mse')\n self.target_critic = clone_model(self.critic, self.custom_model_objects\n )\n self.target_critic.compile(optimizer='sgd', loss='mse')\n self.actor.compile(optimizer='sgd', loss='mse')\n if self.target_model_update < 1.0:\n critic_updates = get_soft_target_model_updates(self.\n target_critic, self.critic, self.target_model_update)\n critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,\n critic_updates)\n self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error\n ] * self.nb_players, metrics=critic_metrics)\n critic_inputs = []\n actor_inputs = []\n for i in self.critic.input:\n if i in self.critic_action_inputs:\n critic_inputs.append([])\n else:\n critic_inputs.append(i)\n actor_inputs.append(i)\n actor_outputs = self.actor(actor_inputs)\n if not isinstance(actor_outputs, (list,)):\n actor_outputs = [actor_outputs]\n assert len(actor_outputs) == self.nb_players\n for input_idx, actor_output in zip(self.critic_action_input_idxes,\n actor_outputs):\n critic_inputs[input_idx] = actor_output\n critic_outputs = self.critic(critic_inputs)\n if not isinstance(critic_outputs, (list,)):\n critic_outputs = [critic_outputs]\n assert len(critic_outputs) == self.nb_players\n actor_losses = [None] * self.nb_players\n for input_idx, critic_output in zip(self.critic_action_input_idxes,\n critic_outputs):\n actor_losses[input_idx] = -K.mean(critic_output)\n updates = actor_optimizer.get_updates(params=self.actor.\n trainable_weights, loss=actor_losses)\n if self.target_model_update < 1.0:\n updates += get_soft_target_model_updates(self.target_actor,\n self.actor, self.target_model_update)\n updates += self.actor.updates\n if K.backend() == 'tensorflow':\n self.actor_train_fn = K.function(actor_inputs + [K.\n learning_phase()], actor_outputs, updates=updates)\n else:\n if self.uses_learning_phase:\n actor_inputs += [K.learning_phase()]\n self.actor_train_fn = K.function(actor_inputs, actor_outputs,\n updates=updates)\n self.actor_optimizer = actor_optimizer\n self.compiled = True\n\n def load_weights(self, filepath):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.load_weights(actor_filepath)\n self.critic.load_weights(critic_filepath)\n self.update_target_models_hard()\n <mask token>\n\n def update_target_models_hard(self):\n self.target_critic.set_weights(self.critic.get_weights())\n self.target_actor.set_weights(self.actor.get_weights())\n\n def reset_states(self):\n if self.random_process is not None:\n self.random_process.reset_states()\n self.recent_action = None\n self.recent_observation = None\n if self.compiled:\n self.actor.reset_states()\n self.critic.reset_states()\n self.target_actor.reset_states()\n self.target_critic.reset_states()\n\n def process_state_batch(self, batch):\n batch = np.array(batch)\n if self.processor is None:\n return batch\n return self.processor.process_state_batch(batch)\n\n def select_action(self, state):\n batch = self.process_state_batch([state])\n actions = self.actor.predict_on_batch(batch)\n if self.nb_players == 1:\n actions = [actions]\n assert len(actions) == self.nb_players\n assert actions[0].shape == (1, self.nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state_batch_with_action = batch[:]\n else:\n state_batch_with_action = [batch]\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state_batch_with_action.insert(input_idx, actions[action_idx])\n q_values = [qv.flatten() for qv in self.critic.predict_on_batch(\n state_batch_with_action)]\n assert q_values[0].shape == (1,)\n assert len(q_values) == self.nb_players\n action_best = actions[np.argmax(q_values)].flatten()\n assert action_best.shape == (self.nb_actions,)\n if self.training and self.random_process is not None:\n noise = self.random_process.sample()\n assert noise.shape == action_best.shape\n action_best += noise\n return action_best\n\n def forward(self, observation):\n state = self.memory.get_recent_state(observation)\n action = self.select_action(state)\n self.recent_observation = observation\n self.recent_action = action\n return action\n\n @property\n def layers(self):\n return self.actor.layers[:] + self.critic.layers[:]\n\n @property\n def metrics_names(self):\n names = self.critic.metrics_names[:]\n if self.processor is not None:\n names += self.processor.metrics_names[:]\n return names\n\n def backward(self, reward, terminal=False):\n if self.step % self.memory_interval == 0:\n self.memory.append(self.recent_observation, self.recent_action,\n reward, terminal, training=self.training)\n metrics = [np.nan for _ in self.metrics_names]\n if not self.training:\n return metrics\n can_train_either = (self.step > self.nb_steps_warmup_critic or self\n .step > self.nb_steps_warmup_actor)\n if can_train_either and self.step % self.train_interval == 0:\n experiences = self.memory.sample(self.batch_size)\n assert len(experiences) == self.batch_size\n state0_batch = []\n reward_batch = []\n action_batch = []\n terminal1_batch = []\n state1_batch = []\n for e in experiences:\n state0_batch.append(e.state0)\n state1_batch.append(e.state1)\n reward_batch.append(e.reward)\n action_batch.append(e.action)\n terminal1_batch.append(0.0 if e.terminal1 else 1.0)\n state0_batch = self.process_state_batch(state0_batch)\n state1_batch = self.process_state_batch(state1_batch)\n terminal1_batch = np.array(terminal1_batch)\n reward_batch = np.array(reward_batch)\n action_batch = np.array(action_batch)\n assert reward_batch.shape == (self.batch_size,)\n assert terminal1_batch.shape == reward_batch.shape\n assert action_batch.shape == (self.batch_size, self.nb_actions)\n if self.step > self.nb_steps_warmup_critic:\n target_actions = self.target_actor.predict_on_batch(\n state1_batch)\n if not isinstance(target_actions, (list,)):\n target_actions = [target_actions]\n assert len(target_actions) == self.nb_players\n assert target_actions[0].shape == (self.batch_size, self.\n nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state1_batch_with_action = state1_batch[:]\n else:\n state1_batch_with_action = [state1_batch]\n for action_idx, input_idx in enumerate(self.\n critic_action_input_idxes):\n state1_batch_with_action.insert(input_idx,\n target_actions[action_idx])\n target_q_values = self.target_critic.predict_on_batch(\n state1_batch_with_action)\n if not isinstance(target_q_values, (list,)):\n target_q_values = [target_q_values]\n target_q_values = [tqv.flatten() for tqv in target_q_values]\n assert target_q_values[0].shape == reward_batch.shape\n assert len(target_q_values) == self.nb_players\n discounted_reward_batch = [(self.gamma * terminal1_batch *\n tqv) for tqv in target_q_values]\n assert discounted_reward_batch[0].shape == reward_batch.shape\n targets = [(reward_batch + drb) for drb in\n discounted_reward_batch]\n assert targets[0].shape == reward_batch.shape\n assert len(targets) == self.nb_players\n if len(self.critic.inputs) > self.nb_players + 1:\n state0_batch_with_action = state0_batch[:]\n else:\n state0_batch_with_action = [state0_batch]\n for input_idx in self.critic_action_input_idxes:\n state0_batch_with_action.insert(input_idx, action_batch)\n metrics = self.critic.train_on_batch(state0_batch_with_action,\n targets)\n if self.processor is not None:\n metrics += self.processor.metrics\n if self.step > self.nb_steps_warmup_actor:\n if len(self.actor.inputs) >= 2:\n inputs = state0_batch[:]\n else:\n inputs = [state0_batch]\n if self.uses_learning_phase:\n inputs += [self.training]\n action_values = self.actor_train_fn(inputs)\n assert len(action_values) == self.nb_players\n assert action_values[0].shape == (self.batch_size, self.\n nb_actions)\n if (self.target_model_update >= 1 and self.step % self.\n target_model_update == 0):\n self.update_target_models_hard()\n return metrics\n",
"step-3": "<mask token>\n\n\nclass UBDDPGAgent(Agent):\n \"\"\"Write me\n \"\"\"\n\n def __init__(self, nb_actions, actor, critic, nb_players,\n critic_action_inputs, memory, gamma=0.99, batch_size=32,\n nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,\n train_interval=1, memory_interval=1, delta_range=None, delta_clip=\n np.inf, random_process=None, custom_model_objects={},\n target_model_update=0.001, **kwargs):\n assert len(critic_action_inputs) == nb_players\n if hasattr(actor.output, '__len__') and len(actor.output\n ) != nb_players:\n raise ValueError((\n 'Actor \"{}\" does not have the right number of ',\n 'outputs. DDPG expects an actor that has {} outputs.').\n format(actor, nb_players))\n for critic_action_input in critic_action_inputs:\n if critic_action_input not in critic.input:\n raise ValueError(\n 'Critic \"{}\" does not have designated action input \"{}\".'\n .format(critic, critic_action_input))\n if not hasattr(critic.input, '__len__') or len(critic.input) < 2:\n raise ValueError(\n 'Critic \"{}\" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'\n .format(critic))\n super(UBDDPGAgent, self).__init__(**kwargs)\n if target_model_update < 0:\n raise ValueError('`target_model_update` must be >= 0.')\n elif target_model_update >= 1:\n target_model_update = int(target_model_update)\n else:\n target_model_update = float(target_model_update)\n if delta_range is not None:\n warnings.warn(\n \"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`\"\n .format(delta_range[1]))\n delta_clip = delta_range[1]\n self.nb_actions = nb_actions\n self.nb_steps_warmup_actor = nb_steps_warmup_actor\n self.nb_steps_warmup_critic = nb_steps_warmup_critic\n self.random_process = random_process\n self.delta_clip = delta_clip\n self.gamma = gamma\n self.target_model_update = target_model_update\n self.batch_size = batch_size\n self.train_interval = train_interval\n self.memory_interval = memory_interval\n self.custom_model_objects = custom_model_objects\n self.actor = actor\n self.critic = critic\n self.nb_players = nb_players\n self.critic_action_inputs = critic_action_inputs\n self.critic_action_input_idxes = [self.critic.input.index(\n critic_action_input) for critic_action_input in\n critic_action_inputs]\n self.memory = memory\n self.compiled = False\n self.reset_states()\n\n @property\n def uses_learning_phase(self):\n return (self.actor.uses_learning_phase or self.critic.\n uses_learning_phase)\n\n def compile(self, optimizer, metrics=[]):\n metrics += [mean_q]\n if type(optimizer) in (list, tuple):\n if len(optimizer) != 2:\n raise ValueError(\n 'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'\n )\n actor_optimizer, critic_optimizer = optimizer\n else:\n actor_optimizer = optimizer\n critic_optimizer = clone_optimizer(optimizer)\n if type(actor_optimizer) is str:\n actor_optimizer = optimizers.get(actor_optimizer)\n if type(critic_optimizer) is str:\n critic_optimizer = optimizers.get(critic_optimizer)\n assert actor_optimizer != critic_optimizer\n if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(\n metrics[1], '__len__'):\n actor_metrics, critic_metrics = metrics\n else:\n actor_metrics = critic_metrics = metrics\n\n def clipped_error(y_true, y_pred):\n y_true = K.squeeze(y_true, axis=-1)\n y_pred = K.squeeze(y_pred, axis=-1)\n loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)\n return loss\n self.target_actor = clone_model(self.actor, self.custom_model_objects)\n self.target_actor.compile(optimizer='sgd', loss='mse')\n self.target_critic = clone_model(self.critic, self.custom_model_objects\n )\n self.target_critic.compile(optimizer='sgd', loss='mse')\n self.actor.compile(optimizer='sgd', loss='mse')\n if self.target_model_update < 1.0:\n critic_updates = get_soft_target_model_updates(self.\n target_critic, self.critic, self.target_model_update)\n critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,\n critic_updates)\n self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error\n ] * self.nb_players, metrics=critic_metrics)\n critic_inputs = []\n actor_inputs = []\n for i in self.critic.input:\n if i in self.critic_action_inputs:\n critic_inputs.append([])\n else:\n critic_inputs.append(i)\n actor_inputs.append(i)\n actor_outputs = self.actor(actor_inputs)\n if not isinstance(actor_outputs, (list,)):\n actor_outputs = [actor_outputs]\n assert len(actor_outputs) == self.nb_players\n for input_idx, actor_output in zip(self.critic_action_input_idxes,\n actor_outputs):\n critic_inputs[input_idx] = actor_output\n critic_outputs = self.critic(critic_inputs)\n if not isinstance(critic_outputs, (list,)):\n critic_outputs = [critic_outputs]\n assert len(critic_outputs) == self.nb_players\n actor_losses = [None] * self.nb_players\n for input_idx, critic_output in zip(self.critic_action_input_idxes,\n critic_outputs):\n actor_losses[input_idx] = -K.mean(critic_output)\n updates = actor_optimizer.get_updates(params=self.actor.\n trainable_weights, loss=actor_losses)\n if self.target_model_update < 1.0:\n updates += get_soft_target_model_updates(self.target_actor,\n self.actor, self.target_model_update)\n updates += self.actor.updates\n if K.backend() == 'tensorflow':\n self.actor_train_fn = K.function(actor_inputs + [K.\n learning_phase()], actor_outputs, updates=updates)\n else:\n if self.uses_learning_phase:\n actor_inputs += [K.learning_phase()]\n self.actor_train_fn = K.function(actor_inputs, actor_outputs,\n updates=updates)\n self.actor_optimizer = actor_optimizer\n self.compiled = True\n\n def load_weights(self, filepath):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.load_weights(actor_filepath)\n self.critic.load_weights(critic_filepath)\n self.update_target_models_hard()\n\n def save_weights(self, filepath, overwrite=False):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.save_weights(actor_filepath, overwrite=overwrite)\n self.critic.save_weights(critic_filepath, overwrite=overwrite)\n\n def update_target_models_hard(self):\n self.target_critic.set_weights(self.critic.get_weights())\n self.target_actor.set_weights(self.actor.get_weights())\n\n def reset_states(self):\n if self.random_process is not None:\n self.random_process.reset_states()\n self.recent_action = None\n self.recent_observation = None\n if self.compiled:\n self.actor.reset_states()\n self.critic.reset_states()\n self.target_actor.reset_states()\n self.target_critic.reset_states()\n\n def process_state_batch(self, batch):\n batch = np.array(batch)\n if self.processor is None:\n return batch\n return self.processor.process_state_batch(batch)\n\n def select_action(self, state):\n batch = self.process_state_batch([state])\n actions = self.actor.predict_on_batch(batch)\n if self.nb_players == 1:\n actions = [actions]\n assert len(actions) == self.nb_players\n assert actions[0].shape == (1, self.nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state_batch_with_action = batch[:]\n else:\n state_batch_with_action = [batch]\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state_batch_with_action.insert(input_idx, actions[action_idx])\n q_values = [qv.flatten() for qv in self.critic.predict_on_batch(\n state_batch_with_action)]\n assert q_values[0].shape == (1,)\n assert len(q_values) == self.nb_players\n action_best = actions[np.argmax(q_values)].flatten()\n assert action_best.shape == (self.nb_actions,)\n if self.training and self.random_process is not None:\n noise = self.random_process.sample()\n assert noise.shape == action_best.shape\n action_best += noise\n return action_best\n\n def forward(self, observation):\n state = self.memory.get_recent_state(observation)\n action = self.select_action(state)\n self.recent_observation = observation\n self.recent_action = action\n return action\n\n @property\n def layers(self):\n return self.actor.layers[:] + self.critic.layers[:]\n\n @property\n def metrics_names(self):\n names = self.critic.metrics_names[:]\n if self.processor is not None:\n names += self.processor.metrics_names[:]\n return names\n\n def backward(self, reward, terminal=False):\n if self.step % self.memory_interval == 0:\n self.memory.append(self.recent_observation, self.recent_action,\n reward, terminal, training=self.training)\n metrics = [np.nan for _ in self.metrics_names]\n if not self.training:\n return metrics\n can_train_either = (self.step > self.nb_steps_warmup_critic or self\n .step > self.nb_steps_warmup_actor)\n if can_train_either and self.step % self.train_interval == 0:\n experiences = self.memory.sample(self.batch_size)\n assert len(experiences) == self.batch_size\n state0_batch = []\n reward_batch = []\n action_batch = []\n terminal1_batch = []\n state1_batch = []\n for e in experiences:\n state0_batch.append(e.state0)\n state1_batch.append(e.state1)\n reward_batch.append(e.reward)\n action_batch.append(e.action)\n terminal1_batch.append(0.0 if e.terminal1 else 1.0)\n state0_batch = self.process_state_batch(state0_batch)\n state1_batch = self.process_state_batch(state1_batch)\n terminal1_batch = np.array(terminal1_batch)\n reward_batch = np.array(reward_batch)\n action_batch = np.array(action_batch)\n assert reward_batch.shape == (self.batch_size,)\n assert terminal1_batch.shape == reward_batch.shape\n assert action_batch.shape == (self.batch_size, self.nb_actions)\n if self.step > self.nb_steps_warmup_critic:\n target_actions = self.target_actor.predict_on_batch(\n state1_batch)\n if not isinstance(target_actions, (list,)):\n target_actions = [target_actions]\n assert len(target_actions) == self.nb_players\n assert target_actions[0].shape == (self.batch_size, self.\n nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state1_batch_with_action = state1_batch[:]\n else:\n state1_batch_with_action = [state1_batch]\n for action_idx, input_idx in enumerate(self.\n critic_action_input_idxes):\n state1_batch_with_action.insert(input_idx,\n target_actions[action_idx])\n target_q_values = self.target_critic.predict_on_batch(\n state1_batch_with_action)\n if not isinstance(target_q_values, (list,)):\n target_q_values = [target_q_values]\n target_q_values = [tqv.flatten() for tqv in target_q_values]\n assert target_q_values[0].shape == reward_batch.shape\n assert len(target_q_values) == self.nb_players\n discounted_reward_batch = [(self.gamma * terminal1_batch *\n tqv) for tqv in target_q_values]\n assert discounted_reward_batch[0].shape == reward_batch.shape\n targets = [(reward_batch + drb) for drb in\n discounted_reward_batch]\n assert targets[0].shape == reward_batch.shape\n assert len(targets) == self.nb_players\n if len(self.critic.inputs) > self.nb_players + 1:\n state0_batch_with_action = state0_batch[:]\n else:\n state0_batch_with_action = [state0_batch]\n for input_idx in self.critic_action_input_idxes:\n state0_batch_with_action.insert(input_idx, action_batch)\n metrics = self.critic.train_on_batch(state0_batch_with_action,\n targets)\n if self.processor is not None:\n metrics += self.processor.metrics\n if self.step > self.nb_steps_warmup_actor:\n if len(self.actor.inputs) >= 2:\n inputs = state0_batch[:]\n else:\n inputs = [state0_batch]\n if self.uses_learning_phase:\n inputs += [self.training]\n action_values = self.actor_train_fn(inputs)\n assert len(action_values) == self.nb_players\n assert action_values[0].shape == (self.batch_size, self.\n nb_actions)\n if (self.target_model_update >= 1 and self.step % self.\n target_model_update == 0):\n self.update_target_models_hard()\n return metrics\n",
"step-4": "from __future__ import division\nfrom collections import deque\nimport os\nimport warnings\nimport numpy as np\nimport keras.backend as K\nimport keras.layers as layers\nimport keras.optimizers as optimizers\nfrom rl.core import Agent\nfrom rl.util import *\n\n\ndef mean_q(y_true, y_pred):\n return K.mean(K.max(y_pred, axis=-1))\n\n\nclass UBDDPGAgent(Agent):\n \"\"\"Write me\n \"\"\"\n\n def __init__(self, nb_actions, actor, critic, nb_players,\n critic_action_inputs, memory, gamma=0.99, batch_size=32,\n nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,\n train_interval=1, memory_interval=1, delta_range=None, delta_clip=\n np.inf, random_process=None, custom_model_objects={},\n target_model_update=0.001, **kwargs):\n assert len(critic_action_inputs) == nb_players\n if hasattr(actor.output, '__len__') and len(actor.output\n ) != nb_players:\n raise ValueError((\n 'Actor \"{}\" does not have the right number of ',\n 'outputs. DDPG expects an actor that has {} outputs.').\n format(actor, nb_players))\n for critic_action_input in critic_action_inputs:\n if critic_action_input not in critic.input:\n raise ValueError(\n 'Critic \"{}\" does not have designated action input \"{}\".'\n .format(critic, critic_action_input))\n if not hasattr(critic.input, '__len__') or len(critic.input) < 2:\n raise ValueError(\n 'Critic \"{}\" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'\n .format(critic))\n super(UBDDPGAgent, self).__init__(**kwargs)\n if target_model_update < 0:\n raise ValueError('`target_model_update` must be >= 0.')\n elif target_model_update >= 1:\n target_model_update = int(target_model_update)\n else:\n target_model_update = float(target_model_update)\n if delta_range is not None:\n warnings.warn(\n \"`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we're falling back to `delta_range[1] = {}`\"\n .format(delta_range[1]))\n delta_clip = delta_range[1]\n self.nb_actions = nb_actions\n self.nb_steps_warmup_actor = nb_steps_warmup_actor\n self.nb_steps_warmup_critic = nb_steps_warmup_critic\n self.random_process = random_process\n self.delta_clip = delta_clip\n self.gamma = gamma\n self.target_model_update = target_model_update\n self.batch_size = batch_size\n self.train_interval = train_interval\n self.memory_interval = memory_interval\n self.custom_model_objects = custom_model_objects\n self.actor = actor\n self.critic = critic\n self.nb_players = nb_players\n self.critic_action_inputs = critic_action_inputs\n self.critic_action_input_idxes = [self.critic.input.index(\n critic_action_input) for critic_action_input in\n critic_action_inputs]\n self.memory = memory\n self.compiled = False\n self.reset_states()\n\n @property\n def uses_learning_phase(self):\n return (self.actor.uses_learning_phase or self.critic.\n uses_learning_phase)\n\n def compile(self, optimizer, metrics=[]):\n metrics += [mean_q]\n if type(optimizer) in (list, tuple):\n if len(optimizer) != 2:\n raise ValueError(\n 'More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.'\n )\n actor_optimizer, critic_optimizer = optimizer\n else:\n actor_optimizer = optimizer\n critic_optimizer = clone_optimizer(optimizer)\n if type(actor_optimizer) is str:\n actor_optimizer = optimizers.get(actor_optimizer)\n if type(critic_optimizer) is str:\n critic_optimizer = optimizers.get(critic_optimizer)\n assert actor_optimizer != critic_optimizer\n if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(\n metrics[1], '__len__'):\n actor_metrics, critic_metrics = metrics\n else:\n actor_metrics = critic_metrics = metrics\n\n def clipped_error(y_true, y_pred):\n y_true = K.squeeze(y_true, axis=-1)\n y_pred = K.squeeze(y_pred, axis=-1)\n loss = K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)\n return loss\n self.target_actor = clone_model(self.actor, self.custom_model_objects)\n self.target_actor.compile(optimizer='sgd', loss='mse')\n self.target_critic = clone_model(self.critic, self.custom_model_objects\n )\n self.target_critic.compile(optimizer='sgd', loss='mse')\n self.actor.compile(optimizer='sgd', loss='mse')\n if self.target_model_update < 1.0:\n critic_updates = get_soft_target_model_updates(self.\n target_critic, self.critic, self.target_model_update)\n critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer,\n critic_updates)\n self.critic.compile(optimizer=critic_optimizer, loss=[clipped_error\n ] * self.nb_players, metrics=critic_metrics)\n critic_inputs = []\n actor_inputs = []\n for i in self.critic.input:\n if i in self.critic_action_inputs:\n critic_inputs.append([])\n else:\n critic_inputs.append(i)\n actor_inputs.append(i)\n actor_outputs = self.actor(actor_inputs)\n if not isinstance(actor_outputs, (list,)):\n actor_outputs = [actor_outputs]\n assert len(actor_outputs) == self.nb_players\n for input_idx, actor_output in zip(self.critic_action_input_idxes,\n actor_outputs):\n critic_inputs[input_idx] = actor_output\n critic_outputs = self.critic(critic_inputs)\n if not isinstance(critic_outputs, (list,)):\n critic_outputs = [critic_outputs]\n assert len(critic_outputs) == self.nb_players\n actor_losses = [None] * self.nb_players\n for input_idx, critic_output in zip(self.critic_action_input_idxes,\n critic_outputs):\n actor_losses[input_idx] = -K.mean(critic_output)\n updates = actor_optimizer.get_updates(params=self.actor.\n trainable_weights, loss=actor_losses)\n if self.target_model_update < 1.0:\n updates += get_soft_target_model_updates(self.target_actor,\n self.actor, self.target_model_update)\n updates += self.actor.updates\n if K.backend() == 'tensorflow':\n self.actor_train_fn = K.function(actor_inputs + [K.\n learning_phase()], actor_outputs, updates=updates)\n else:\n if self.uses_learning_phase:\n actor_inputs += [K.learning_phase()]\n self.actor_train_fn = K.function(actor_inputs, actor_outputs,\n updates=updates)\n self.actor_optimizer = actor_optimizer\n self.compiled = True\n\n def load_weights(self, filepath):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.load_weights(actor_filepath)\n self.critic.load_weights(critic_filepath)\n self.update_target_models_hard()\n\n def save_weights(self, filepath, overwrite=False):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.save_weights(actor_filepath, overwrite=overwrite)\n self.critic.save_weights(critic_filepath, overwrite=overwrite)\n\n def update_target_models_hard(self):\n self.target_critic.set_weights(self.critic.get_weights())\n self.target_actor.set_weights(self.actor.get_weights())\n\n def reset_states(self):\n if self.random_process is not None:\n self.random_process.reset_states()\n self.recent_action = None\n self.recent_observation = None\n if self.compiled:\n self.actor.reset_states()\n self.critic.reset_states()\n self.target_actor.reset_states()\n self.target_critic.reset_states()\n\n def process_state_batch(self, batch):\n batch = np.array(batch)\n if self.processor is None:\n return batch\n return self.processor.process_state_batch(batch)\n\n def select_action(self, state):\n batch = self.process_state_batch([state])\n actions = self.actor.predict_on_batch(batch)\n if self.nb_players == 1:\n actions = [actions]\n assert len(actions) == self.nb_players\n assert actions[0].shape == (1, self.nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state_batch_with_action = batch[:]\n else:\n state_batch_with_action = [batch]\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state_batch_with_action.insert(input_idx, actions[action_idx])\n q_values = [qv.flatten() for qv in self.critic.predict_on_batch(\n state_batch_with_action)]\n assert q_values[0].shape == (1,)\n assert len(q_values) == self.nb_players\n action_best = actions[np.argmax(q_values)].flatten()\n assert action_best.shape == (self.nb_actions,)\n if self.training and self.random_process is not None:\n noise = self.random_process.sample()\n assert noise.shape == action_best.shape\n action_best += noise\n return action_best\n\n def forward(self, observation):\n state = self.memory.get_recent_state(observation)\n action = self.select_action(state)\n self.recent_observation = observation\n self.recent_action = action\n return action\n\n @property\n def layers(self):\n return self.actor.layers[:] + self.critic.layers[:]\n\n @property\n def metrics_names(self):\n names = self.critic.metrics_names[:]\n if self.processor is not None:\n names += self.processor.metrics_names[:]\n return names\n\n def backward(self, reward, terminal=False):\n if self.step % self.memory_interval == 0:\n self.memory.append(self.recent_observation, self.recent_action,\n reward, terminal, training=self.training)\n metrics = [np.nan for _ in self.metrics_names]\n if not self.training:\n return metrics\n can_train_either = (self.step > self.nb_steps_warmup_critic or self\n .step > self.nb_steps_warmup_actor)\n if can_train_either and self.step % self.train_interval == 0:\n experiences = self.memory.sample(self.batch_size)\n assert len(experiences) == self.batch_size\n state0_batch = []\n reward_batch = []\n action_batch = []\n terminal1_batch = []\n state1_batch = []\n for e in experiences:\n state0_batch.append(e.state0)\n state1_batch.append(e.state1)\n reward_batch.append(e.reward)\n action_batch.append(e.action)\n terminal1_batch.append(0.0 if e.terminal1 else 1.0)\n state0_batch = self.process_state_batch(state0_batch)\n state1_batch = self.process_state_batch(state1_batch)\n terminal1_batch = np.array(terminal1_batch)\n reward_batch = np.array(reward_batch)\n action_batch = np.array(action_batch)\n assert reward_batch.shape == (self.batch_size,)\n assert terminal1_batch.shape == reward_batch.shape\n assert action_batch.shape == (self.batch_size, self.nb_actions)\n if self.step > self.nb_steps_warmup_critic:\n target_actions = self.target_actor.predict_on_batch(\n state1_batch)\n if not isinstance(target_actions, (list,)):\n target_actions = [target_actions]\n assert len(target_actions) == self.nb_players\n assert target_actions[0].shape == (self.batch_size, self.\n nb_actions)\n if len(self.critic.inputs) > self.nb_players + 1:\n state1_batch_with_action = state1_batch[:]\n else:\n state1_batch_with_action = [state1_batch]\n for action_idx, input_idx in enumerate(self.\n critic_action_input_idxes):\n state1_batch_with_action.insert(input_idx,\n target_actions[action_idx])\n target_q_values = self.target_critic.predict_on_batch(\n state1_batch_with_action)\n if not isinstance(target_q_values, (list,)):\n target_q_values = [target_q_values]\n target_q_values = [tqv.flatten() for tqv in target_q_values]\n assert target_q_values[0].shape == reward_batch.shape\n assert len(target_q_values) == self.nb_players\n discounted_reward_batch = [(self.gamma * terminal1_batch *\n tqv) for tqv in target_q_values]\n assert discounted_reward_batch[0].shape == reward_batch.shape\n targets = [(reward_batch + drb) for drb in\n discounted_reward_batch]\n assert targets[0].shape == reward_batch.shape\n assert len(targets) == self.nb_players\n if len(self.critic.inputs) > self.nb_players + 1:\n state0_batch_with_action = state0_batch[:]\n else:\n state0_batch_with_action = [state0_batch]\n for input_idx in self.critic_action_input_idxes:\n state0_batch_with_action.insert(input_idx, action_batch)\n metrics = self.critic.train_on_batch(state0_batch_with_action,\n targets)\n if self.processor is not None:\n metrics += self.processor.metrics\n if self.step > self.nb_steps_warmup_actor:\n if len(self.actor.inputs) >= 2:\n inputs = state0_batch[:]\n else:\n inputs = [state0_batch]\n if self.uses_learning_phase:\n inputs += [self.training]\n action_values = self.actor_train_fn(inputs)\n assert len(action_values) == self.nb_players\n assert action_values[0].shape == (self.batch_size, self.\n nb_actions)\n if (self.target_model_update >= 1 and self.step % self.\n target_model_update == 0):\n self.update_target_models_hard()\n return metrics\n",
"step-5": "from __future__ import division\nfrom collections import deque\nimport os\nimport warnings\n\nimport numpy as np\nimport keras.backend as K\nimport keras.layers as layers\nimport keras.optimizers as optimizers\n\nfrom rl.core import Agent\nfrom rl.util import *\n\n\ndef mean_q(y_true, y_pred):\n return K.mean(K.max(y_pred, axis=-1))\n\n\n# Deep DPG as described by Lillicrap et al. (2015)\n# http://arxiv.org/pdf/1509.02971v2.pdf\n# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf\nclass UBDDPGAgent(Agent):\n \"\"\"Write me\n \"\"\"\n def __init__(self, nb_actions, actor, critic, nb_players, critic_action_inputs, memory,\n gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,\n train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,\n random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):\n assert len(critic_action_inputs) == nb_players\n if hasattr(actor.output, '__len__') and len(actor.output) != nb_players:\n raise ValueError((\n 'Actor \"{}\" does not have the right number of ',\n 'outputs. DDPG expects an actor that has {} outputs.'\n ).format(actor, nb_players))\n # if hasattr(critic.output, '__len__') and len(critic.output) > 1:\n # raise ValueError('Critic \"{}\" has more than one output. DDPG expects a critic that has a single output.'.format(critic))\n for critic_action_input in critic_action_inputs:\n if critic_action_input not in critic.input:\n raise ValueError('Critic \"{}\" does not have designated action input \"{}\".'.format(critic, critic_action_input))\n if not hasattr(critic.input, '__len__') or len(critic.input) < 2:\n raise ValueError('Critic \"{}\" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'.format(critic))\n\n super(UBDDPGAgent, self).__init__(**kwargs)\n\n # Soft vs hard target model updates.\n if target_model_update < 0:\n raise ValueError('`target_model_update` must be >= 0.')\n elif target_model_update >= 1:\n # Hard update every `target_model_update` steps.\n target_model_update = int(target_model_update)\n else:\n # Soft update with `(1 - target_model_update) * old + target_model_update * new`.\n target_model_update = float(target_model_update)\n\n if delta_range is not None:\n warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))\n delta_clip = delta_range[1]\n\n # Parameters.\n self.nb_actions = nb_actions\n self.nb_steps_warmup_actor = nb_steps_warmup_actor\n self.nb_steps_warmup_critic = nb_steps_warmup_critic\n self.random_process = random_process\n self.delta_clip = delta_clip\n self.gamma = gamma\n self.target_model_update = target_model_update\n self.batch_size = batch_size\n self.train_interval = train_interval\n self.memory_interval = memory_interval\n self.custom_model_objects = custom_model_objects\n\n # Related objects.\n self.actor = actor\n self.critic = critic\n self.nb_players = nb_players\n self.critic_action_inputs = critic_action_inputs\n self.critic_action_input_idxes = [\n self.critic.input.index(critic_action_input)\n for critic_action_input in critic_action_inputs\n ]\n self.memory = memory\n\n # State.\n self.compiled = False\n self.reset_states()\n\n @property\n def uses_learning_phase(self):\n return self.actor.uses_learning_phase or self.critic.uses_learning_phase\n\n def compile(self, optimizer, metrics=[]):\n metrics += [mean_q]\n\n if type(optimizer) in (list, tuple):\n if len(optimizer) != 2:\n raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')\n actor_optimizer, critic_optimizer = optimizer\n else:\n actor_optimizer = optimizer\n critic_optimizer = clone_optimizer(optimizer)\n if type(actor_optimizer) is str:\n actor_optimizer = optimizers.get(actor_optimizer)\n if type(critic_optimizer) is str:\n critic_optimizer = optimizers.get(critic_optimizer)\n assert actor_optimizer != critic_optimizer\n\n if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):\n actor_metrics, critic_metrics = metrics\n else:\n actor_metrics = critic_metrics = metrics\n\n def clipped_error(y_true, y_pred):\n y_true = K.squeeze(y_true, axis=-1)\n y_pred = K.squeeze(y_pred, axis=-1)\n loss = K.mean(\n # K.random_uniform(shape=(self.batch_size, self.nb_players), minval=0., maxval=1.) *\n huber_loss(y_true, y_pred, self.delta_clip),\n axis=-1)\n # y_true = K.print_tensor(y_true, message='y_true: ')\n # y_pred = K.print_tensor(y_pred, message='y_pred: ')\n # loss = K.print_tensor(loss, message='loss: ')\n return loss\n\n # Compile target networks. We only use them in feed-forward mode, hence we can pass any\n # optimizer and loss since we never use it anyway.\n self.target_actor = clone_model(self.actor, self.custom_model_objects)\n self.target_actor.compile(optimizer='sgd', loss='mse')\n self.target_critic = clone_model(self.critic, self.custom_model_objects)\n self.target_critic.compile(optimizer='sgd', loss='mse')\n\n # We also compile the actor. We never optimize the actor using Keras but instead compute\n # the policy gradient ourselves. However, we need the actor in feed-forward mode, hence\n # we also compile it with any optimzer and\n self.actor.compile(optimizer='sgd', loss='mse')\n\n # Compile the critic.\n if self.target_model_update < 1.:\n # We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.\n critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)\n critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)\n self.critic.compile(\n optimizer=critic_optimizer,\n loss=[clipped_error]*self.nb_players,\n metrics=critic_metrics)\n\n # Combine actor and critic so that we can get the policy gradient.\n # Assuming critic's state inputs are the same as actor's.\n critic_inputs = []\n actor_inputs = []\n for i in self.critic.input:\n if i in self.critic_action_inputs:\n critic_inputs.append([])\n else:\n critic_inputs.append(i)\n actor_inputs.append(i)\n actor_outputs = self.actor(actor_inputs)\n if not isinstance(actor_outputs, (list,)):\n actor_outputs = [actor_outputs]\n assert len(actor_outputs) == self.nb_players\n for input_idx, actor_output in zip(self.critic_action_input_idxes, actor_outputs):\n critic_inputs[input_idx] = actor_output\n\n # critic_outputs = layers.Maximum()(self.critic(critic_inputs))\n critic_outputs = self.critic(critic_inputs)\n if not isinstance(critic_outputs, (list,)):\n critic_outputs = [critic_outputs]\n assert len(critic_outputs) == self.nb_players\n\n actor_losses = [None]* self.nb_players\n for input_idx, critic_output in zip(self.critic_action_input_idxes, critic_outputs):\n actor_losses[input_idx] = -K.mean(critic_output)\n updates = actor_optimizer.get_updates(\n params=self.actor.trainable_weights,\n loss=actor_losses)\n if self.target_model_update < 1.:\n # Include soft target model updates.\n updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)\n updates += self.actor.updates # include other updates of the actor, e.g. for BN\n\n # Finally, combine it all into a callable function.\n if K.backend() == 'tensorflow':\n self.actor_train_fn = K.function(actor_inputs + [K.learning_phase()],\n actor_outputs, updates=updates)\n else:\n if self.uses_learning_phase:\n actor_inputs += [K.learning_phase()]\n self.actor_train_fn = K.function(actor_inputs, actor_outputs, updates=updates)\n self.actor_optimizer = actor_optimizer\n\n self.compiled = True\n\n def load_weights(self, filepath):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.load_weights(actor_filepath)\n self.critic.load_weights(critic_filepath)\n self.update_target_models_hard()\n\n def save_weights(self, filepath, overwrite=False):\n filename, extension = os.path.splitext(filepath)\n actor_filepath = filename + '_actor' + extension\n critic_filepath = filename + '_critic' + extension\n self.actor.save_weights(actor_filepath, overwrite=overwrite)\n self.critic.save_weights(critic_filepath, overwrite=overwrite)\n\n def update_target_models_hard(self):\n self.target_critic.set_weights(self.critic.get_weights())\n self.target_actor.set_weights(self.actor.get_weights())\n\n # TODO: implement pickle\n\n def reset_states(self):\n if self.random_process is not None:\n self.random_process.reset_states()\n self.recent_action = None\n self.recent_observation = None\n if self.compiled:\n self.actor.reset_states()\n self.critic.reset_states()\n self.target_actor.reset_states()\n self.target_critic.reset_states()\n\n def process_state_batch(self, batch):\n batch = np.array(batch)\n if self.processor is None:\n return batch\n return self.processor.process_state_batch(batch)\n\n def select_action(self, state):\n batch = self.process_state_batch([state])\n # actions = [action.flatten() for action in self.actor.predict_on_batch(batch)]\n actions = self.actor.predict_on_batch(batch)\n if self.nb_players == 1:\n actions =[actions]\n # actions = [a.flatten() for a in actions]\n assert len(actions) == self.nb_players\n # assert actions[0].shape == (self.nb_actions,)\n assert actions[0].shape == (1, self.nb_actions)\n # print('actions: {}'.format(actions))\n\n if len(self.critic.inputs) > (self.nb_players+1): # state is a list\n state_batch_with_action = batch[:]\n else:\n state_batch_with_action = [batch]\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state_batch_with_action.insert(input_idx, actions[action_idx])\n q_values = [\n qv.flatten() \n for qv in self.critic.predict_on_batch(state_batch_with_action)\n ]\n assert q_values[0].shape == (1, )\n assert len(q_values) == self.nb_players\n # print('q_values: {}'.format(q_values))\n\n action_best = actions[np.argmax(q_values)].flatten()\n # assert action_best.shape == (self.nb_actions, )\n assert action_best.shape == (self.nb_actions, )\n # print('action_best: {}'.format(action_best))\n # print(type(action_best[0]))\n\n # Apply noise, if a random process is set.\n if self.training and self.random_process is not None:\n noise = self.random_process.sample()\n assert noise.shape == action_best.shape\n action_best += noise\n\n return action_best\n\n def forward(self, observation):\n # Select an action.\n state = self.memory.get_recent_state(observation)\n action = self.select_action(state) # TODO: move this into policy\n\n # Book-keeping.\n self.recent_observation = observation\n self.recent_action = action\n\n return action\n\n @property\n def layers(self):\n return self.actor.layers[:] + self.critic.layers[:]\n\n @property\n def metrics_names(self):\n names = self.critic.metrics_names[:]\n if self.processor is not None:\n names += self.processor.metrics_names[:]\n return names\n\n def backward(self, reward, terminal=False):\n # Store most recent experience in memory.\n if self.step % self.memory_interval == 0:\n self.memory.append(self.recent_observation, self.recent_action, reward, terminal,\n training=self.training)\n\n metrics = [np.nan for _ in self.metrics_names]\n if not self.training:\n # We're done here. No need to update the experience memory since we only use the working\n # memory to obtain the state over the most recent observations.\n return metrics\n\n # Train the network on a single stochastic batch.\n can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor\n if can_train_either and self.step % self.train_interval == 0:\n experiences = self.memory.sample(self.batch_size)\n assert len(experiences) == self.batch_size\n\n # Start by extracting the necessary parameters (we use a vectorized implementation).\n state0_batch = []\n reward_batch = []\n action_batch = []\n terminal1_batch = []\n state1_batch = []\n for e in experiences:\n state0_batch.append(e.state0)\n state1_batch.append(e.state1)\n reward_batch.append(e.reward)\n action_batch.append(e.action)\n terminal1_batch.append(0. if e.terminal1 else 1.)\n\n # Prepare and validate parameters.\n state0_batch = self.process_state_batch(state0_batch)\n state1_batch = self.process_state_batch(state1_batch)\n terminal1_batch = np.array(terminal1_batch)\n reward_batch = np.array(reward_batch)\n action_batch = np.array(action_batch)\n assert reward_batch.shape == (self.batch_size,)\n assert terminal1_batch.shape == reward_batch.shape\n assert action_batch.shape == (self.batch_size, self.nb_actions)\n\n # Update critic, if warm up is over.\n if self.step > self.nb_steps_warmup_critic:\n target_actions = self.target_actor.predict_on_batch(state1_batch)\n if not isinstance(target_actions, (list,)):\n target_actions = [target_actions]\n assert len(target_actions) == self.nb_players\n assert target_actions[0].shape == (self.batch_size, self.nb_actions)\n if len(self.critic.inputs) > (self.nb_players+1): # state is a list\n # if len(self.critic.inputs) >= 3:\n state1_batch_with_action = state1_batch[:]\n else:\n state1_batch_with_action = [state1_batch]\n # state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)\n for action_idx, input_idx in enumerate(self.critic_action_input_idxes):\n state1_batch_with_action.insert(input_idx, target_actions[action_idx])\n target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action)\n if not isinstance(target_q_values, (list,)):\n target_q_values = [target_q_values]\n target_q_values = [ tqv.flatten() for tqv in target_q_values]\n assert target_q_values[0].shape == reward_batch.shape\n assert len(target_q_values) == self.nb_players\n\n # Compute r_t + gamma * Q(s_t+1, mu(s_t+1)) and update the target ys accordingly,\n # but only for the affected output units (as given by action_batch).\n discounted_reward_batch = [\n self.gamma * terminal1_batch * tqv\n for tqv in target_q_values\n ]\n assert discounted_reward_batch[0].shape == reward_batch.shape\n targets = [reward_batch + drb for drb in discounted_reward_batch] # .reshape(self.batch_size, 1)\n assert targets[0].shape == reward_batch.shape\n assert len(targets) == self.nb_players\n\n # Perform a single batch update on the critic network.\n # if len(self.critic.inputs) >= 3:\n if len(self.critic.inputs) > (self.nb_players+1): # state is a list\n state0_batch_with_action = state0_batch[:]\n else:\n state0_batch_with_action = [state0_batch]\n for input_idx in self.critic_action_input_idxes:\n state0_batch_with_action.insert(input_idx, action_batch)\n # state0_batch_with_action.insert(self.critic_action_input_idx, action_batch)\n metrics = self.critic.train_on_batch(\n state0_batch_with_action,\n targets)\n if self.processor is not None:\n metrics += self.processor.metrics\n\n # q_values = self.critic.predict_on_batch(state0_batch_with_action)\n # if not isinstance(q_values, (list,)):\n # q_values = [q_values]\n # q_values = [ qv.flatten() for qv in q_values]\n # print('gamma: {}'.format(self.gamma))\n # print('terminal1_batch: {}'.format(terminal1_batch))\n # print('target_q_values: {}'.format(target_q_values))\n # print('discounted_reward_batch: {}'.format(discounted_reward_batch))\n # print('reward_batch: {}'.format(reward_batch))\n # print('targets: {}'.format(targets))\n # print('current q values: {}'.format(q_values))\n\n\n # Update actor, if warm up is over.\n if self.step > self.nb_steps_warmup_actor:\n # TODO: implement metrics for actor\n if len(self.actor.inputs) >= 2:\n inputs = state0_batch[:]\n else:\n inputs = [state0_batch]\n if self.uses_learning_phase:\n inputs += [self.training]\n action_values = self.actor_train_fn(inputs)\n assert len(action_values) == self.nb_players\n assert action_values[0].shape == (self.batch_size, self.nb_actions)\n\n if self.target_model_update >= 1 and self.step % self.target_model_update == 0:\n self.update_target_models_hard()\n\n return metrics\n",
"step-ids": [
9,
12,
15,
17,
18
]
}
|
[
9,
12,
15,
17,
18
] |
import torch
import torch.nn as nn
from model.common import UpsampleBlock, conv_, SELayer
def wrapper(args):
act = None
if args.act == 'relu':
act = nn.ReLU(True)
elif args.act == 'leak_relu':
act = nn.LeakyReLU(0.2, True)
elif args.act is None:
act = None
else:
raise NotImplementedError
return AFN(in_c=args.n_colors, out_c=args.n_colors, scale=args.scale, n_feats=args.n_feats, act=act)
class AFB_0(nn.Module):
def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):
super(AFB_0, self).__init__()
self.op = []
for _ in range(n_blocks):
self.op.append(conv_(channels, channels))
self.op.append(act)
self.op = nn.Sequential(*self.op)
def forward(self, x):
x = x + self.op(x)
return x
class AFB_L1(nn.Module):
def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):
super(AFB_L1, self).__init__()
self.n = n_l0
self.convs_ = nn.ModuleList()
for _ in range(n_l0):
self.convs_.append(
AFB_0(channels, 2, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l0, 16),
nn.Conv2d(channels * n_l0, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFB_L2(nn.Module):
def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):
super(AFB_L2, self).__init__()
self.n = n_l1
self.convs_ = nn.ModuleList()
for _ in range(n_l1):
self.convs_.append(
AFB_L1(channels, 3, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l1, 16),
nn.Conv2d(channels * n_l1, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFB_L3(nn.Module):
def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):
super(AFB_L3, self).__init__()
self.n = n_l2
self.convs_ = nn.ModuleList()
for _ in range(n_l2):
self.convs_.append(
AFB_L2(channels, 4, act)
)
self.LFF = nn.Sequential(
SELayer(channels * n_l2, 16),
nn.Conv2d(channels * n_l2, channels, 1, padding=0, stride=1),
)
def forward(self, x):
res = []
ox = x
for i in range(self.n):
x = self.convs_[i](x)
res.append(x)
res = self.LFF(torch.cat(res, 1))
x = res + ox
return x
class AFN(nn.Module):
def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True)):
super(AFN, self).__init__()
self.head = conv_(in_c, n_feats)
self.n = n_l3
self.AFBs = nn.ModuleList()
for i in range(n_l3):
self.AFBs.append(
AFB_L3(channels=n_feats, n_l2=4, act=act)
)
self.GFF = nn.Sequential(*[
SELayer(n_feats * n_l3),
conv_(n_feats * n_l3, n_feats, 1, padding=0, stride=1),
])
self.tail = nn.Sequential(*[
UpsampleBlock(scale, n_feats, kernel_size=3, stride=1, bias=True, act=act),
conv_(n_feats, out_c)
])
def forward(self, x):
res = []
x = self.head(x)
for i in range(self.n):
x = self.AFBs[i](x)
res.append(x)
res = self.GFF(torch.cat(res, 1))
x = res + x
x = self.tail(x)
return x
if __name__ == "__main__":
import numpy as np
import torch
import torchsummary
model = AFN(in_c=3, out_c=3, scale=8, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True))
print(torchsummary.summary(model, (3, 24, 24), device='cpu'))
x = np.random.uniform(0, 1, [2, 3, 24, 24]).astype(np.float32)
x = torch.tensor(x)
# loss = nn.L1Loss()
# Adam = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.99, 0.999))
with torch.autograd.profiler.profile(use_cuda=True) as prof:
y = model(x)
print(prof)
print(y.shape)
|
normal
|
{
"blob_id": "b2c0ef4a0af12b267a54a7ae3fed9edeab2fb879",
"index": 6570,
"step-1": "<mask token>\n\n\nclass AFB_L1(nn.Module):\n <mask token>\n <mask token>\n\n\nclass AFB_L2(nn.Module):\n\n def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):\n super(AFB_L2, self).__init__()\n self.n = n_l1\n self.convs_ = nn.ModuleList()\n for _ in range(n_l1):\n self.convs_.append(AFB_L1(channels, 3, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l1, 16), nn.Conv2d(\n channels * n_l1, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L3(nn.Module):\n\n def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):\n super(AFB_L3, self).__init__()\n self.n = n_l2\n self.convs_ = nn.ModuleList()\n for _ in range(n_l2):\n self.convs_.append(AFB_L2(channels, 4, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l2, 16), nn.Conv2d(\n channels * n_l2, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFN(nn.Module):\n\n def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=\n nn.LeakyReLU(0.2, True)):\n super(AFN, self).__init__()\n self.head = conv_(in_c, n_feats)\n self.n = n_l3\n self.AFBs = nn.ModuleList()\n for i in range(n_l3):\n self.AFBs.append(AFB_L3(channels=n_feats, n_l2=4, act=act))\n self.GFF = nn.Sequential(*[SELayer(n_feats * n_l3), conv_(n_feats *\n n_l3, n_feats, 1, padding=0, stride=1)])\n self.tail = nn.Sequential(*[UpsampleBlock(scale, n_feats,\n kernel_size=3, stride=1, bias=True, act=act), conv_(n_feats,\n out_c)])\n\n def forward(self, x):\n res = []\n x = self.head(x)\n for i in range(self.n):\n x = self.AFBs[i](x)\n res.append(x)\n res = self.GFF(torch.cat(res, 1))\n x = res + x\n x = self.tail(x)\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AFB_0(nn.Module):\n\n def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):\n super(AFB_0, self).__init__()\n self.op = []\n for _ in range(n_blocks):\n self.op.append(conv_(channels, channels))\n self.op.append(act)\n self.op = nn.Sequential(*self.op)\n <mask token>\n\n\nclass AFB_L1(nn.Module):\n\n def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):\n super(AFB_L1, self).__init__()\n self.n = n_l0\n self.convs_ = nn.ModuleList()\n for _ in range(n_l0):\n self.convs_.append(AFB_0(channels, 2, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l0, 16), nn.Conv2d(\n channels * n_l0, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L2(nn.Module):\n\n def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):\n super(AFB_L2, self).__init__()\n self.n = n_l1\n self.convs_ = nn.ModuleList()\n for _ in range(n_l1):\n self.convs_.append(AFB_L1(channels, 3, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l1, 16), nn.Conv2d(\n channels * n_l1, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L3(nn.Module):\n\n def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):\n super(AFB_L3, self).__init__()\n self.n = n_l2\n self.convs_ = nn.ModuleList()\n for _ in range(n_l2):\n self.convs_.append(AFB_L2(channels, 4, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l2, 16), nn.Conv2d(\n channels * n_l2, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFN(nn.Module):\n\n def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=\n nn.LeakyReLU(0.2, True)):\n super(AFN, self).__init__()\n self.head = conv_(in_c, n_feats)\n self.n = n_l3\n self.AFBs = nn.ModuleList()\n for i in range(n_l3):\n self.AFBs.append(AFB_L3(channels=n_feats, n_l2=4, act=act))\n self.GFF = nn.Sequential(*[SELayer(n_feats * n_l3), conv_(n_feats *\n n_l3, n_feats, 1, padding=0, stride=1)])\n self.tail = nn.Sequential(*[UpsampleBlock(scale, n_feats,\n kernel_size=3, stride=1, bias=True, act=act), conv_(n_feats,\n out_c)])\n\n def forward(self, x):\n res = []\n x = self.head(x)\n for i in range(self.n):\n x = self.AFBs[i](x)\n res.append(x)\n res = self.GFF(torch.cat(res, 1))\n x = res + x\n x = self.tail(x)\n return x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef wrapper(args):\n act = None\n if args.act == 'relu':\n act = nn.ReLU(True)\n elif args.act == 'leak_relu':\n act = nn.LeakyReLU(0.2, True)\n elif args.act is None:\n act = None\n else:\n raise NotImplementedError\n return AFN(in_c=args.n_colors, out_c=args.n_colors, scale=args.scale,\n n_feats=args.n_feats, act=act)\n\n\nclass AFB_0(nn.Module):\n\n def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):\n super(AFB_0, self).__init__()\n self.op = []\n for _ in range(n_blocks):\n self.op.append(conv_(channels, channels))\n self.op.append(act)\n self.op = nn.Sequential(*self.op)\n\n def forward(self, x):\n x = x + self.op(x)\n return x\n\n\nclass AFB_L1(nn.Module):\n\n def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):\n super(AFB_L1, self).__init__()\n self.n = n_l0\n self.convs_ = nn.ModuleList()\n for _ in range(n_l0):\n self.convs_.append(AFB_0(channels, 2, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l0, 16), nn.Conv2d(\n channels * n_l0, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L2(nn.Module):\n\n def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):\n super(AFB_L2, self).__init__()\n self.n = n_l1\n self.convs_ = nn.ModuleList()\n for _ in range(n_l1):\n self.convs_.append(AFB_L1(channels, 3, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l1, 16), nn.Conv2d(\n channels * n_l1, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L3(nn.Module):\n\n def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):\n super(AFB_L3, self).__init__()\n self.n = n_l2\n self.convs_ = nn.ModuleList()\n for _ in range(n_l2):\n self.convs_.append(AFB_L2(channels, 4, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l2, 16), nn.Conv2d(\n channels * n_l2, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFN(nn.Module):\n\n def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=\n nn.LeakyReLU(0.2, True)):\n super(AFN, self).__init__()\n self.head = conv_(in_c, n_feats)\n self.n = n_l3\n self.AFBs = nn.ModuleList()\n for i in range(n_l3):\n self.AFBs.append(AFB_L3(channels=n_feats, n_l2=4, act=act))\n self.GFF = nn.Sequential(*[SELayer(n_feats * n_l3), conv_(n_feats *\n n_l3, n_feats, 1, padding=0, stride=1)])\n self.tail = nn.Sequential(*[UpsampleBlock(scale, n_feats,\n kernel_size=3, stride=1, bias=True, act=act), conv_(n_feats,\n out_c)])\n\n def forward(self, x):\n res = []\n x = self.head(x)\n for i in range(self.n):\n x = self.AFBs[i](x)\n res.append(x)\n res = self.GFF(torch.cat(res, 1))\n x = res + x\n x = self.tail(x)\n return x\n\n\nif __name__ == '__main__':\n import numpy as np\n import torch\n import torchsummary\n model = AFN(in_c=3, out_c=3, scale=8, n_feats=128, n_l3=3, act=nn.\n LeakyReLU(0.2, True))\n print(torchsummary.summary(model, (3, 24, 24), device='cpu'))\n x = np.random.uniform(0, 1, [2, 3, 24, 24]).astype(np.float32)\n x = torch.tensor(x)\n with torch.autograd.profiler.profile(use_cuda=True) as prof:\n y = model(x)\n print(prof)\n print(y.shape)\n",
"step-4": "import torch\nimport torch.nn as nn\nfrom model.common import UpsampleBlock, conv_, SELayer\n\n\ndef wrapper(args):\n act = None\n if args.act == 'relu':\n act = nn.ReLU(True)\n elif args.act == 'leak_relu':\n act = nn.LeakyReLU(0.2, True)\n elif args.act is None:\n act = None\n else:\n raise NotImplementedError\n return AFN(in_c=args.n_colors, out_c=args.n_colors, scale=args.scale,\n n_feats=args.n_feats, act=act)\n\n\nclass AFB_0(nn.Module):\n\n def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):\n super(AFB_0, self).__init__()\n self.op = []\n for _ in range(n_blocks):\n self.op.append(conv_(channels, channels))\n self.op.append(act)\n self.op = nn.Sequential(*self.op)\n\n def forward(self, x):\n x = x + self.op(x)\n return x\n\n\nclass AFB_L1(nn.Module):\n\n def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):\n super(AFB_L1, self).__init__()\n self.n = n_l0\n self.convs_ = nn.ModuleList()\n for _ in range(n_l0):\n self.convs_.append(AFB_0(channels, 2, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l0, 16), nn.Conv2d(\n channels * n_l0, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L2(nn.Module):\n\n def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):\n super(AFB_L2, self).__init__()\n self.n = n_l1\n self.convs_ = nn.ModuleList()\n for _ in range(n_l1):\n self.convs_.append(AFB_L1(channels, 3, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l1, 16), nn.Conv2d(\n channels * n_l1, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L3(nn.Module):\n\n def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):\n super(AFB_L3, self).__init__()\n self.n = n_l2\n self.convs_ = nn.ModuleList()\n for _ in range(n_l2):\n self.convs_.append(AFB_L2(channels, 4, act))\n self.LFF = nn.Sequential(SELayer(channels * n_l2, 16), nn.Conv2d(\n channels * n_l2, channels, 1, padding=0, stride=1))\n\n def forward(self, x):\n res = []\n ox = x\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFN(nn.Module):\n\n def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=\n nn.LeakyReLU(0.2, True)):\n super(AFN, self).__init__()\n self.head = conv_(in_c, n_feats)\n self.n = n_l3\n self.AFBs = nn.ModuleList()\n for i in range(n_l3):\n self.AFBs.append(AFB_L3(channels=n_feats, n_l2=4, act=act))\n self.GFF = nn.Sequential(*[SELayer(n_feats * n_l3), conv_(n_feats *\n n_l3, n_feats, 1, padding=0, stride=1)])\n self.tail = nn.Sequential(*[UpsampleBlock(scale, n_feats,\n kernel_size=3, stride=1, bias=True, act=act), conv_(n_feats,\n out_c)])\n\n def forward(self, x):\n res = []\n x = self.head(x)\n for i in range(self.n):\n x = self.AFBs[i](x)\n res.append(x)\n res = self.GFF(torch.cat(res, 1))\n x = res + x\n x = self.tail(x)\n return x\n\n\nif __name__ == '__main__':\n import numpy as np\n import torch\n import torchsummary\n model = AFN(in_c=3, out_c=3, scale=8, n_feats=128, n_l3=3, act=nn.\n LeakyReLU(0.2, True))\n print(torchsummary.summary(model, (3, 24, 24), device='cpu'))\n x = np.random.uniform(0, 1, [2, 3, 24, 24]).astype(np.float32)\n x = torch.tensor(x)\n with torch.autograd.profiler.profile(use_cuda=True) as prof:\n y = model(x)\n print(prof)\n print(y.shape)\n",
"step-5": "import torch\nimport torch.nn as nn\nfrom model.common import UpsampleBlock, conv_, SELayer\n\ndef wrapper(args):\n act = None\n if args.act == 'relu':\n act = nn.ReLU(True)\n elif args.act == 'leak_relu':\n act = nn.LeakyReLU(0.2, True)\n elif args.act is None:\n act = None\n else:\n raise NotImplementedError\n\n return AFN(in_c=args.n_colors, out_c=args.n_colors, scale=args.scale, n_feats=args.n_feats, act=act)\n\nclass AFB_0(nn.Module):\n def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)):\n super(AFB_0, self).__init__()\n self.op = []\n for _ in range(n_blocks):\n self.op.append(conv_(channels, channels))\n self.op.append(act)\n\n self.op = nn.Sequential(*self.op)\n\n def forward(self, x):\n x = x + self.op(x)\n return x\n\n\nclass AFB_L1(nn.Module):\n def __init__(self, channels, n_l0=3, act=nn.ReLU(True)):\n super(AFB_L1, self).__init__()\n\n self.n = n_l0\n self.convs_ = nn.ModuleList()\n for _ in range(n_l0):\n self.convs_.append(\n AFB_0(channels, 2, act)\n )\n\n self.LFF = nn.Sequential(\n SELayer(channels * n_l0, 16),\n nn.Conv2d(channels * n_l0, channels, 1, padding=0, stride=1),\n )\n\n def forward(self, x):\n res = []\n ox = x\n\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L2(nn.Module):\n def __init__(self, channels, n_l1=4, act=nn.ReLU(True)):\n super(AFB_L2, self).__init__()\n\n self.n = n_l1\n self.convs_ = nn.ModuleList()\n for _ in range(n_l1):\n self.convs_.append(\n AFB_L1(channels, 3, act)\n )\n\n self.LFF = nn.Sequential(\n SELayer(channels * n_l1, 16),\n nn.Conv2d(channels * n_l1, channels, 1, padding=0, stride=1),\n )\n\n def forward(self, x):\n res = []\n ox = x\n\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFB_L3(nn.Module):\n def __init__(self, channels, n_l2=4, act=nn.ReLU(True)):\n super(AFB_L3, self).__init__()\n\n self.n = n_l2\n self.convs_ = nn.ModuleList()\n for _ in range(n_l2):\n self.convs_.append(\n AFB_L2(channels, 4, act)\n )\n\n self.LFF = nn.Sequential(\n SELayer(channels * n_l2, 16),\n nn.Conv2d(channels * n_l2, channels, 1, padding=0, stride=1),\n )\n\n def forward(self, x):\n res = []\n ox = x\n\n for i in range(self.n):\n x = self.convs_[i](x)\n res.append(x)\n res = self.LFF(torch.cat(res, 1))\n x = res + ox\n return x\n\n\nclass AFN(nn.Module):\n def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True)):\n super(AFN, self).__init__()\n\n self.head = conv_(in_c, n_feats)\n\n self.n = n_l3\n self.AFBs = nn.ModuleList()\n for i in range(n_l3):\n self.AFBs.append(\n AFB_L3(channels=n_feats, n_l2=4, act=act)\n )\n\n self.GFF = nn.Sequential(*[\n SELayer(n_feats * n_l3),\n conv_(n_feats * n_l3, n_feats, 1, padding=0, stride=1),\n ])\n\n self.tail = nn.Sequential(*[\n UpsampleBlock(scale, n_feats, kernel_size=3, stride=1, bias=True, act=act),\n conv_(n_feats, out_c)\n ])\n\n def forward(self, x):\n res = []\n x = self.head(x)\n\n for i in range(self.n):\n x = self.AFBs[i](x)\n res.append(x)\n\n res = self.GFF(torch.cat(res, 1))\n x = res + x\n\n x = self.tail(x)\n return x\n\nif __name__ == \"__main__\":\n import numpy as np\n import torch\n import torchsummary\n\n model = AFN(in_c=3, out_c=3, scale=8, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True))\n print(torchsummary.summary(model, (3, 24, 24), device='cpu'))\n\n x = np.random.uniform(0, 1, [2, 3, 24, 24]).astype(np.float32)\n x = torch.tensor(x)\n\n # loss = nn.L1Loss()\n # Adam = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.99, 0.999))\n with torch.autograd.profiler.profile(use_cuda=True) as prof:\n y = model(x)\n print(prof)\n print(y.shape)\n",
"step-ids": [
10,
14,
17,
18,
19
]
}
|
[
10,
14,
17,
18,
19
] |
<|reserved_special_token_0|>
def draw_detections(img, rects, thickness=1):
for x, y, w, h in rects:
pad_w, pad_h = int(0.15 * w), int(0.05 * h)
cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -
pad_h), (0, 255, 0), thickness)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness=1):
for x, y, w, h in rects:
pad_w, pad_h = int(0.15 * w), int(0.05 * h)
cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -
pad_h), (0, 255, 0), thickness)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness=1):
for x, y, w, h in rects:
pad_w, pad_h = int(0.15 * w), int(0.05 * h)
cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -
pad_h), (0, 255, 0), thickness)
if __name__ == '__main__':
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
found, w = hog.detectMultiScale(frame, **hogParams)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
draw_detections(frame, found_filtered, 3)
print('%d (%d) found' % (len(found_filtered), len(found)))
key = cv2.waitKey(10)
if key == 27:
cv2.destroyAllWindows()
break
cv2.imshow('img', frame)
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness=1):
for x, y, w, h in rects:
pad_w, pad_h = int(0.15 * w), int(0.05 * h)
cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -
pad_h), (0, 255, 0), thickness)
if __name__ == '__main__':
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
found, w = hog.detectMultiScale(frame, **hogParams)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
draw_detections(frame, found_filtered, 3)
print('%d (%d) found' % (len(found_filtered), len(found)))
key = cv2.waitKey(10)
if key == 27:
cv2.destroyAllWindows()
break
cv2.imshow('img', frame)
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
#from skimage import measure
#from svmutil import *
import cv2
import numpy as np
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness = 1):
for x, y, w, h in rects:
# the HOG detector returns slightly larger rectangles than the real objects.
# so we slightly shrink the rectangles to get a nicer output.
pad_w, pad_h = int(0.15*w), int(0.05*h)
cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
if __name__ == '__main__':
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
if not ret:
break
found, w = hog.detectMultiScale(frame, **hogParams)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
#draw_detections(frame, found)
draw_detections(frame, found_filtered, 3)
print('%d (%d) found' % (len(found_filtered), len(found)))
key = cv2.waitKey(10)
if key == 27:
cv2.destroyAllWindows()
break
cv2.imshow('img', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cap.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "f012f862ad064fc168bd5328b97c433164a3a36f",
"index": 3742,
"step-1": "<mask token>\n\n\ndef draw_detections(img, rects, thickness=1):\n for x, y, w, h in rects:\n pad_w, pad_h = int(0.15 * w), int(0.05 * h)\n cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -\n pad_h), (0, 255, 0), thickness)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef inside(r, q):\n rx, ry, rw, rh = r\n qx, qy, qw, qh = q\n return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh\n\n\ndef draw_detections(img, rects, thickness=1):\n for x, y, w, h in rects:\n pad_w, pad_h = int(0.15 * w), int(0.05 * h)\n cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -\n pad_h), (0, 255, 0), thickness)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef inside(r, q):\n rx, ry, rw, rh = r\n qx, qy, qw, qh = q\n return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh\n\n\ndef draw_detections(img, rects, thickness=1):\n for x, y, w, h in rects:\n pad_w, pad_h = int(0.15 * w), int(0.05 * h)\n cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -\n pad_h), (0, 255, 0), thickness)\n\n\nif __name__ == '__main__':\n hog = cv2.HOGDescriptor()\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n found, w = hog.detectMultiScale(frame, **hogParams)\n found_filtered = []\n for ri, r in enumerate(found):\n for qi, q in enumerate(found):\n if ri != qi and inside(r, q):\n break\n else:\n found_filtered.append(r)\n draw_detections(frame, found_filtered, 3)\n print('%d (%d) found' % (len(found_filtered), len(found)))\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyAllWindows()\n break\n cv2.imshow('img', frame)\n cap.release()\n cv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\n\n\ndef inside(r, q):\n rx, ry, rw, rh = r\n qx, qy, qw, qh = q\n return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh\n\n\ndef draw_detections(img, rects, thickness=1):\n for x, y, w, h in rects:\n pad_w, pad_h = int(0.15 * w), int(0.05 * h)\n cv2.rectangle(img, (x + pad_w, y + pad_h), (x + w - pad_w, y + h -\n pad_h), (0, 255, 0), thickness)\n\n\nif __name__ == '__main__':\n hog = cv2.HOGDescriptor()\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n hogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n found, w = hog.detectMultiScale(frame, **hogParams)\n found_filtered = []\n for ri, r in enumerate(found):\n for qi, q in enumerate(found):\n if ri != qi and inside(r, q):\n break\n else:\n found_filtered.append(r)\n draw_detections(frame, found_filtered, 3)\n print('%d (%d) found' % (len(found_filtered), len(found)))\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyAllWindows()\n break\n cv2.imshow('img', frame)\n cap.release()\n cv2.destroyAllWindows()\n",
"step-5": "#from skimage import measure\n#from svmutil import *\nimport cv2\nimport numpy as np \n\ndef inside(r, q):\n\trx, ry, rw, rh = r\n\tqx, qy, qw, qh = q\n\treturn rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh\n\ndef draw_detections(img, rects, thickness = 1):\n\tfor x, y, w, h in rects:\n # the HOG detector returns slightly larger rectangles than the real objects.\n # so we slightly shrink the rectangles to get a nicer output.\n\t\tpad_w, pad_h = int(0.15*w), int(0.05*h)\n\t\tcv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)\n\nif __name__ == '__main__': \n\thog = cv2.HOGDescriptor()\n\thog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n\thogParams = {'winStride': (8, 8), 'padding': (32, 32), 'scale': 1.05}\n\n\tcap = cv2.VideoCapture(0)\n\n\twhile(True):\n\n\t\tret, frame = cap.read()\n\t\tif not ret:\n\t\t\tbreak\n\n\t\tfound, w = hog.detectMultiScale(frame, **hogParams)\n\t\tfound_filtered = []\n\t\tfor ri, r in enumerate(found):\n\t\t\tfor qi, q in enumerate(found):\n\t\t\t\tif ri != qi and inside(r, q):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tfound_filtered.append(r)\n\n\t\t#draw_detections(frame, found)\n\t\tdraw_detections(frame, found_filtered, 3)\n\t\tprint('%d (%d) found' % (len(found_filtered), len(found)))\n\t\tkey = cv2.waitKey(10)\n\t\tif key == 27:\n\t\t\tcv2.destroyAllWindows()\n\t\t\tbreak\n\n\t\tcv2.imshow('img', frame)\n#\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n#\t\t\tbreak\n\t\n\tcap.release()\n\tcv2.destroyAllWindows()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readme():
with open('README.rst') as f:
return f.read()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readme():
with open('README.rst') as f:
return f.read()
setup(name='keputils', version='0.2.1', description=
'Basic module for interaction with KOI and Kepler-stellar tables.',
long_description=readme(), author='Timothy D. Morton', author_email=
'tim.morton@gmail.com', url=
'https://github.com/timothydmorton/keputils', packages=['keputils'],
scripts=['scripts/koiquery'], classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[
'pandas>=0.13', 'simpledist'], zip_safe=False)
<|reserved_special_token_1|>
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='keputils', version='0.2.1', description=
'Basic module for interaction with KOI and Kepler-stellar tables.',
long_description=readme(), author='Timothy D. Morton', author_email=
'tim.morton@gmail.com', url=
'https://github.com/timothydmorton/keputils', packages=['keputils'],
scripts=['scripts/koiquery'], classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent', 'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[
'pandas>=0.13', 'simpledist'], zip_safe=False)
<|reserved_special_token_1|>
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name = "keputils",
version = "0.2.1",
description = "Basic module for interaction with KOI and Kepler-stellar tables.",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "tim.morton@gmail.com",
url = "https://github.com/timothydmorton/keputils",
packages = ['keputils'],
scripts = ['scripts/koiquery'],
#entry_points = {'console_scripts' : ['koiquery = koiquery:main']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.13','simpledist'],
zip_safe=False
)
|
flexible
|
{
"blob_id": "6da828a797efac7c37723db96a2682e960c317b5",
"index": 1007,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name='keputils', version='0.2.1', description=\n 'Basic module for interaction with KOI and Kepler-stellar tables.',\n long_description=readme(), author='Timothy D. Morton', author_email=\n 'tim.morton@gmail.com', url=\n 'https://github.com/timothydmorton/keputils', packages=['keputils'],\n scripts=['scripts/koiquery'], classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[\n 'pandas>=0.13', 'simpledist'], zip_safe=False)\n",
"step-4": "from setuptools import setup\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name='keputils', version='0.2.1', description=\n 'Basic module for interaction with KOI and Kepler-stellar tables.',\n long_description=readme(), author='Timothy D. Morton', author_email=\n 'tim.morton@gmail.com', url=\n 'https://github.com/timothydmorton/keputils', packages=['keputils'],\n scripts=['scripts/koiquery'], classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'], install_requires=[\n 'pandas>=0.13', 'simpledist'], zip_safe=False)\n",
"step-5": "from setuptools import setup\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nsetup(name = \"keputils\",\n version = \"0.2.1\",\n description = \"Basic module for interaction with KOI and Kepler-stellar tables.\",\n long_description = readme(),\n author = \"Timothy D. Morton\",\n author_email = \"tim.morton@gmail.com\",\n url = \"https://github.com/timothydmorton/keputils\",\n packages = ['keputils'],\n scripts = ['scripts/koiquery'],\n #entry_points = {'console_scripts' : ['koiquery = koiquery:main']},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Astronomy'\n ],\n install_requires=['pandas>=0.13','simpledist'],\n zip_safe=False\n) \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask
app = Flask(__name__)
import orderapi, views, models, processing
if __name__=="__main__":
orderapi.app.debug = True
orderapi.app.run(host='0.0.0.0', port=34203)
views.app.debug = True
views.app.run(host='0.0.0.0', port=42720)
|
normal
|
{
"blob_id": "3218a9e82cd19bab1680079aee5f09a97992629e",
"index": 6038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n<mask token>\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-4": "from flask import Flask\napp = Flask(__name__)\nimport orderapi, views, models, processing\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-5": "from flask import Flask\napp = Flask(__name__)\nimport orderapi, views, models, processing\n\nif __name__==\"__main__\":\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 30_Days_Of_Code
# Day 2
# Boolean
print(True)
print(False)
|
normal
|
{
"blob_id": "f1ca3d7ff7efcf500f1a16e415b13c47fd08688d",
"index": 5044,
"step-1": "<mask token>\n",
"step-2": "print(True)\nprint(False)\n",
"step-3": "# 30_Days_Of_Code\n# Day 2\n# Boolean\nprint(True)\nprint(False)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 3.2.4 on 2021-06-29 13:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_blogdetail'),
]
operations = [
migrations.RenameField(
model_name='bloglist',
old_name='about',
new_name='intro',
),
]
|
normal
|
{
"blob_id": "1cf5ce11b965d65426ed421ef369954c59d7eba9",
"index": 3199,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0002_blogdetail')]\n operations = [migrations.RenameField(model_name='bloglist', old_name=\n 'about', new_name='intro')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0002_blogdetail')]\n operations = [migrations.RenameField(model_name='bloglist', old_name=\n 'about', new_name='intro')]\n",
"step-5": "# Generated by Django 3.2.4 on 2021-06-29 13:20\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0002_blogdetail'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='bloglist',\n old_name='about',\n new_name='intro',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = "Rick Sherman"
__credits__ = "Jeremy Schulman, Nitin Kumar"
import unittest
from nose.plugins.attrib import attr
from jnpr.junos import Device
from jnpr.junos.utils.scp import SCP
from mock import patch
@attr('unit')
class TestScp(unittest.TestCase):
def setUp(self):
self.dev = Device(host='1.1.1.1')
@patch('paramiko.SSHClient')
def test_scp_open(self, mock_connect):
from scp import SCPClient
self.dev.bind(scp=SCP)
assert isinstance(self.dev.scp.open(), SCPClient)
@patch('paramiko.SSHClient')
def test_scp_close(self, mock_connect):
self.dev.bind(scp=SCP)
self.dev.scp.open()
self.assertEqual(self.dev.scp.close(), None)
@patch('paramiko.SSHClient')
def test_scp_context(self, mock_connect):
with SCP(self.dev) as scp:
scp.get('addrbook.conf')
@patch('jnpr.junos.device.os')
@patch('__builtin__.open')
@patch('paramiko.config.SSHConfig.lookup')
@patch('paramiko.SSHClient')
@patch('paramiko.proxy.ProxyCommand')
def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy):
os_mock.path.exists.return_value = True
self.dev._sshconf_path = '/home/rsherman/.ssh/config'
with SCP(self.dev) as scp:
scp.get('addrbook.conf')
mock_proxy.assert_called_any()
|
normal
|
{
"blob_id": "65ea40ad1c1bf6bf23aed5316b91862c9cdc353d",
"index": 5564,
"step-1": "<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n <mask token>\n <mask token>\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n",
"step-2": "<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n",
"step-3": "__author__ = 'Rick Sherman'\n__credits__ = 'Jeremy Schulman, Nitin Kumar'\n<mask token>\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n",
"step-4": "__author__ = 'Rick Sherman'\n__credits__ = 'Jeremy Schulman, Nitin Kumar'\nimport unittest\nfrom nose.plugins.attrib import attr\nfrom jnpr.junos import Device\nfrom jnpr.junos.utils.scp import SCP\nfrom mock import patch\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko,\n mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n",
"step-5": "__author__ = \"Rick Sherman\"\n__credits__ = \"Jeremy Schulman, Nitin Kumar\"\n\nimport unittest\nfrom nose.plugins.attrib import attr\n\nfrom jnpr.junos import Device\nfrom jnpr.junos.utils.scp import SCP\n\nfrom mock import patch\n\n\n@attr('unit')\nclass TestScp(unittest.TestCase):\n def setUp(self):\n self.dev = Device(host='1.1.1.1')\n\n @patch('paramiko.SSHClient')\n def test_scp_open(self, mock_connect):\n from scp import SCPClient\n self.dev.bind(scp=SCP)\n assert isinstance(self.dev.scp.open(), SCPClient)\n\n @patch('paramiko.SSHClient')\n def test_scp_close(self, mock_connect):\n self.dev.bind(scp=SCP)\n self.dev.scp.open()\n self.assertEqual(self.dev.scp.close(), None)\n\n @patch('paramiko.SSHClient')\n def test_scp_context(self, mock_connect):\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n\n @patch('jnpr.junos.device.os')\n @patch('__builtin__.open')\n @patch('paramiko.config.SSHConfig.lookup')\n @patch('paramiko.SSHClient')\n @patch('paramiko.proxy.ProxyCommand')\n def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy):\n os_mock.path.exists.return_value = True\n self.dev._sshconf_path = '/home/rsherman/.ssh/config'\n with SCP(self.dev) as scp:\n scp.get('addrbook.conf')\n mock_proxy.assert_called_any()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
def test_logsources_model(self):
"""
Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado
Returns:
"""
log_source = LogSources.objects.get(Model="iptables v1.4.21")
self.assertEqual(log_source.get_model(), "iptables v1.4.21")
|
normal
|
{
"blob_id": "c645461effe288a1959b783473d62ff99ca29547",
"index": 8746,
"step-1": "<mask token>\n",
"step-2": "def test_logsources_model(self):\n \"\"\"\n Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado\n Returns:\n\n \"\"\"\n log_source = LogSources.objects.get(Model='iptables v1.4.21')\n self.assertEqual(log_source.get_model(), 'iptables v1.4.21')\n",
"step-3": "def test_logsources_model(self):\n \"\"\"\n Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado\n Returns:\n\n \"\"\"\n log_source = LogSources.objects.get(Model=\"iptables v1.4.21\")\n self.assertEqual(log_source.get_model(), \"iptables v1.4.21\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class BackendThread(QThread):
<|reserved_special_token_0|>
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString('yyyy-MM-dd hh:mm:ss')
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400, 100)
self.input = QLineEdit(self)
self.input.resize(400, 100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self, data):
self.input.setText(data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString('yyyy-MM-dd hh:mm:ss')
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400, 100)
self.input = QLineEdit(self)
self.input.resize(400, 100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self, data):
self.input.setText(data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString('yyyy-MM-dd hh:mm:ss')
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400, 100)
self.input = QLineEdit(self)
self.input.resize(400, 100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self, data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = ThreadUpdateUI()
example.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from PyQt5.QtCore import QThread, pyqtSignal, QDateTime
from PyQt5.QtWidgets import QApplication, QDialog, QLineEdit
import time
import sys
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString('yyyy-MM-dd hh:mm:ss')
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400, 100)
self.input = QLineEdit(self)
self.input.resize(400, 100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self, data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = ThreadUpdateUI()
example.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
'''
多线程更新UI数据(在两个线程中传递数据)
'''
from PyQt5.QtCore import QThread , pyqtSignal, QDateTime
from PyQt5.QtWidgets import QApplication, QDialog, QLineEdit
import time
import sys
class BackendThread(QThread):
update_date = pyqtSignal(str)
def run(self):
while True:
data = QDateTime.currentDateTime()
currentTime = data.toString("yyyy-MM-dd hh:mm:ss")
self.update_date.emit(str(currentTime))
time.sleep(1)
class ThreadUpdateUI(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('多线程更新UI数据')
self.resize(400,100)
self.input = QLineEdit(self)
self.input.resize(400,100)
self.initUI()
def initUI(self):
self.backend = BackendThread()
self.backend.update_date.connect(self.handleDisplay)
self.backend.start()
def handleDisplay(self,data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
example = ThreadUpdateUI()
example.show()
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "ec625bf57388281b3cbd464459fc3ad1c60b7db9",
"index": 3305,
"step-1": "<mask token>\n\n\nclass BackendThread(QThread):\n <mask token>\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())\n",
"step-4": "<mask token>\nfrom PyQt5.QtCore import QThread, pyqtSignal, QDateTime\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLineEdit\nimport time\nimport sys\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString('yyyy-MM-dd hh:mm:ss')\n self.update_date.emit(str(currentTime))\n time.sleep(1)\n\n\nclass ThreadUpdateUI(QDialog):\n\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400, 100)\n self.input = QLineEdit(self)\n self.input.resize(400, 100)\n self.initUI()\n\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n self.backend.start()\n\n def handleDisplay(self, data):\n self.input.setText(data)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())\n",
"step-5": "'''\n\n多线程更新UI数据(在两个线程中传递数据)\n\n'''\n\nfrom PyQt5.QtCore import QThread , pyqtSignal, QDateTime\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLineEdit\nimport time\nimport sys\n\n\nclass BackendThread(QThread):\n update_date = pyqtSignal(str)\n\n def run(self):\n while True:\n data = QDateTime.currentDateTime()\n currentTime = data.toString(\"yyyy-MM-dd hh:mm:ss\")\n self.update_date.emit(str(currentTime))\n time.sleep(1)\nclass ThreadUpdateUI(QDialog):\n def __init__(self):\n QDialog.__init__(self)\n self.setWindowTitle('多线程更新UI数据')\n self.resize(400,100)\n self.input = QLineEdit(self)\n self.input.resize(400,100)\n\n self.initUI()\n def initUI(self):\n self.backend = BackendThread()\n self.backend.update_date.connect(self.handleDisplay)\n\n self.backend.start()\n\n def handleDisplay(self,data):\n self.input.setText(data)\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n example = ThreadUpdateUI()\n example.show()\n sys.exit(app.exec_())",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = 'id', 'name', 'parent', 'childcategories'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = 'id', 'name', 'price', 'categories'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RecursiveSerializer(serializers.Serializer):
<|reserved_special_token_0|>
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = 'id', 'name', 'parent', 'childcategories'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = 'id', 'name', 'price', 'categories'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RecursiveSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = 'id', 'name', 'parent', 'childcategories'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = 'id', 'name', 'price', 'categories'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import Category, Product
class RecursiveSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = 'id', 'name', 'parent', 'childcategories'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = 'id', 'name', 'price', 'categories'
<|reserved_special_token_1|>
from rest_framework import serializers
#from rest_framework.response import Response
from .models import Category, Product
class RecursiveSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class CategorySerializers(serializers.ModelSerializer):
childcategories = RecursiveSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('id', 'name', 'parent', 'childcategories',)
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('id', 'name', 'price', 'categories')
#class CategorySerializers(serializers.ModelSerializer):
# class Meta:
# model = Category
# fields = ('id', 'name', 'parent')
#def get_fields(self):
# fields = super(CategorySerializers, self).get_fields()
# #fields['childcategories'] = CategorySerializers(many=True, allow_null=True)
# return fields
#class CategorySerializers(serializers.ModelSerializer):
# class Meta:
# model = Category
# fields = ('id', 'name', 'parent')
|
flexible
|
{
"blob_id": "cd9cc656a62728b3649b00c03ca8d05106015007",
"index": 7949,
"step-1": "<mask token>\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n",
"step-2": "<mask token>\n\n\nclass RecursiveSerializer(serializers.Serializer):\n <mask token>\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n",
"step-3": "<mask token>\n\n\nclass RecursiveSerializer(serializers.Serializer):\n\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n",
"step-4": "from rest_framework import serializers\nfrom .models import Category, Product\n\n\nclass RecursiveSerializer(serializers.Serializer):\n\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n",
"step-5": "from rest_framework import serializers\n#from rest_framework.response import Response\nfrom .models import Category, Product\n \nclass RecursiveSerializer(serializers.Serializer):\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n class Meta:\n model = Category\n fields = ('id', 'name', 'parent', 'childcategories',)\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', 'name', 'price', 'categories')\n\n#class CategorySerializers(serializers.ModelSerializer):\n# class Meta:\n# model = Category\n# fields = ('id', 'name', 'parent')\n\n #def get_fields(self):\n # fields = super(CategorySerializers, self).get_fields()\n # #fields['childcategories'] = CategorySerializers(many=True, allow_null=True)\n # return fields\n\n\n#class CategorySerializers(serializers.ModelSerializer):\n# class Meta:\n# model = Category\n# fields = ('id', 'name', 'parent')",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
from tqdm import tqdm
from system.krl import KRL
from system.utils.format import format_data
from system.oie import OIE
# extract one file
def execute_file(input_fp, output_fp):
oie = OIE()
oie.extract_file(input_fp, output_fp)
# extract one sentence
def execute_sentence():
oie = OIE()
# test one data
line = {"text": "印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校"}
line = {"text": "中美两国的人民反对大规模的杀伤性的武器"}
line = {"id": "6",
"sysId": "eb88374b30fda925b399e787a927327c",
"text": "乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。",
"event_list": [
{"event_type": "举办类", "trigger": "举行", "trigger_start_index": "38", "trigger_end_index": "40",
"trigger_entity_type": "NONE", "arguments": [
{"role": "会议", "argument": "抗议集会", "argument_start_index": "40", "argument_end_index": "44",
"argument_entity_type": "Meeting"},
{"role": "地点", "argument": "普天间基地", "argument_start_index": "31", "argument_end_index": "36",
"argument_entity_type": "ZBGC"},
{"role": "时间", "argument": "13日", "argument_start_index": "0", "argument_end_index": "3",
"argument_entity_type": "Time"},
{"role": "主体", "argument": "冲绳和平运动中心", "argument_start_index": "4", "argument_end_index": "12",
"argument_entity_type": "Org"}]}]}
sample = line['text']
result, quick_look = oie.extract(sample, True, True, True)
print(quick_look)
# s += len(result)
# opobj.write(str(result) + "\n")
# opobj2.write(str(quick_look) + "\n")
# print(s)
# opobj.close()
# opobj2.close()
def clean_triples(train_fp, output_fp, is_train: bool):
krl = KRL()
model_type = 'TransE'
if is_train:
model_type = 'TransE'
krl.train(train_fp, model_type=model_type, dev_path=train_fp, save_path='./krl_{}_saves'.format(model_type))
else:
krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=model_type)
if __name__ == "__main__":
# 1 extract the triples
# eg:{"id": "870", "sysId": "3669195fb557cea411d166d353cc194d",
# "text": "目前,黎以临时边界“蓝线”沿线,特别是靠近叙利亚戈兰高地的地段局势紧张,黎以军队和联合国驻黎巴嫩南部临时部队(联黎部队)都处于高度戒备状态,以应对以色列空袭叙利亚可能引发的军事冲突。",
# "event_list": [{"event_type": "军事冲突类", "trigger": "空袭", "trigger_start_index": "76", "trigger_end_index": "78", "trigger_entity_type": "$element$", "arguments": [{"role": "主体", "argument": "以色列", "argument_start_index": "73", "argument_end_index": "76", "argument_entity_type": "Country"}, {"role": "目标", "argument": "叙利亚", "argument_start_index": "78", "argument_end_index": "81", "argument_entity_type": "Country"}]}]}
# -> [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']]
input_file_path = 'data/all_data.json'
triples_file_path = 'result/1_after_extract.txt'
# execute_file(input_file_path, triples_file_path)
# 2 clean the triples
# transform the data format
# [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']] ->
# 南部临时部队(联黎部队), 处于, 高度戒备状态
# 以色列, 空袭, 叙利亚
formatted_fp = 'result/1_after_extract_formatted.txt'
format_data(triples_file_path, formatted_fp)
# using Knowledge Relation Learning (KRL) to score the triples
cleared_file_path = 'result/2_cleared_extract.txt'
clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path, is_train=True)
|
normal
|
{
"blob_id": "bc5e928305d82c92c10106fe1f69f5979d57e3d2",
"index": 5446,
"step-1": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\nif __name__ == '__main__':\n input_file_path = 'data/all_data.json'\n triples_file_path = 'result/1_after_extract.txt'\n formatted_fp = 'result/1_after_extract_formatted.txt'\n format_data(triples_file_path, formatted_fp)\n cleared_file_path = 'result/2_cleared_extract.txt'\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path,\n is_train=True)\n",
"step-4": "import os\nfrom tqdm import tqdm\nfrom system.krl import KRL\nfrom system.utils.format import format_data\nfrom system.oie import OIE\n\n\ndef execute_file(input_fp, output_fp):\n oie = OIE()\n oie.extract_file(input_fp, output_fp)\n\n\ndef execute_sentence():\n oie = OIE()\n line = {'text': '印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校'}\n line = {'text': '中美两国的人民反对大规模的杀伤性的武器'}\n line = {'id': '6', 'sysId': 'eb88374b30fda925b399e787a927327c', 'text':\n '乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。', 'event_list': [{\n 'event_type': '举办类', 'trigger': '举行', 'trigger_start_index': '38',\n 'trigger_end_index': '40', 'trigger_entity_type': 'NONE',\n 'arguments': [{'role': '会议', 'argument': '抗议集会',\n 'argument_start_index': '40', 'argument_end_index': '44',\n 'argument_entity_type': 'Meeting'}, {'role': '地点', 'argument':\n '普天间基地', 'argument_start_index': '31', 'argument_end_index': '36',\n 'argument_entity_type': 'ZBGC'}, {'role': '时间', 'argument': '13日',\n 'argument_start_index': '0', 'argument_end_index': '3',\n 'argument_entity_type': 'Time'}, {'role': '主体', 'argument':\n '冲绳和平运动中心', 'argument_start_index': '4', 'argument_end_index': '12',\n 'argument_entity_type': 'Org'}]}]}\n sample = line['text']\n result, quick_look = oie.extract(sample, True, True, True)\n print(quick_look)\n\n\ndef clean_triples(train_fp, output_fp, is_train: bool):\n krl = KRL()\n model_type = 'TransE'\n if is_train:\n model_type = 'TransE'\n krl.train(train_fp, model_type=model_type, dev_path=train_fp,\n save_path='./krl_{}_saves'.format(model_type))\n else:\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=\n model_type)\n\n\nif __name__ == '__main__':\n input_file_path = 'data/all_data.json'\n triples_file_path = 'result/1_after_extract.txt'\n formatted_fp = 'result/1_after_extract_formatted.txt'\n format_data(triples_file_path, formatted_fp)\n cleared_file_path = 'result/2_cleared_extract.txt'\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path,\n is_train=True)\n",
"step-5": "import os\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom system.krl import KRL\r\nfrom system.utils.format import format_data\r\nfrom system.oie import OIE\r\n\r\n\r\n# extract one file\r\ndef execute_file(input_fp, output_fp):\r\n oie = OIE()\r\n oie.extract_file(input_fp, output_fp)\r\n\r\n\r\n# extract one sentence\r\ndef execute_sentence():\r\n oie = OIE()\r\n # test one data\r\n line = {\"text\": \"印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校\"}\r\n line = {\"text\": \"中美两国的人民反对大规模的杀伤性的武器\"}\r\n line = {\"id\": \"6\",\r\n \"sysId\": \"eb88374b30fda925b399e787a927327c\",\r\n \"text\": \"乔治·塞菲里斯,生于小亚细亚的斯弥尔纳城,父亲是雅典大学教授,国际法专家。\",\r\n \"event_list\": [\r\n {\"event_type\": \"举办类\", \"trigger\": \"举行\", \"trigger_start_index\": \"38\", \"trigger_end_index\": \"40\",\r\n \"trigger_entity_type\": \"NONE\", \"arguments\": [\r\n {\"role\": \"会议\", \"argument\": \"抗议集会\", \"argument_start_index\": \"40\", \"argument_end_index\": \"44\",\r\n \"argument_entity_type\": \"Meeting\"},\r\n {\"role\": \"地点\", \"argument\": \"普天间基地\", \"argument_start_index\": \"31\", \"argument_end_index\": \"36\",\r\n \"argument_entity_type\": \"ZBGC\"},\r\n {\"role\": \"时间\", \"argument\": \"13日\", \"argument_start_index\": \"0\", \"argument_end_index\": \"3\",\r\n \"argument_entity_type\": \"Time\"},\r\n {\"role\": \"主体\", \"argument\": \"冲绳和平运动中心\", \"argument_start_index\": \"4\", \"argument_end_index\": \"12\",\r\n \"argument_entity_type\": \"Org\"}]}]}\r\n\r\n sample = line['text']\r\n result, quick_look = oie.extract(sample, True, True, True)\r\n print(quick_look)\r\n # s += len(result)\r\n # opobj.write(str(result) + \"\\n\")\r\n # opobj2.write(str(quick_look) + \"\\n\")\r\n # print(s)\r\n # opobj.close()\r\n # opobj2.close()\r\n\r\n\r\ndef clean_triples(train_fp, output_fp, is_train: bool):\r\n krl = KRL()\r\n model_type = 'TransE'\r\n\r\n if is_train:\r\n model_type = 'TransE'\r\n krl.train(train_fp, model_type=model_type, dev_path=train_fp, save_path='./krl_{}_saves'.format(model_type))\r\n else:\r\n krl.load(save_path='./krl_{}_saves'.format(model_type), model_type=model_type)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # 1 extract the triples\r\n # eg:{\"id\": \"870\", \"sysId\": \"3669195fb557cea411d166d353cc194d\",\r\n # \"text\": \"目前,黎以临时边界“蓝线”沿线,特别是靠近叙利亚戈兰高地的地段局势紧张,黎以军队和联合国驻黎巴嫩南部临时部队(联黎部队)都处于高度戒备状态,以应对以色列空袭叙利亚可能引发的军事冲突。\",\r\n # \"event_list\": [{\"event_type\": \"军事冲突类\", \"trigger\": \"空袭\", \"trigger_start_index\": \"76\", \"trigger_end_index\": \"78\", \"trigger_entity_type\": \"$element$\", \"arguments\": [{\"role\": \"主体\", \"argument\": \"以色列\", \"argument_start_index\": \"73\", \"argument_end_index\": \"76\", \"argument_entity_type\": \"Country\"}, {\"role\": \"目标\", \"argument\": \"叙利亚\", \"argument_start_index\": \"78\", \"argument_end_index\": \"81\", \"argument_entity_type\": \"Country\"}]}]}\r\n # -> [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']]\r\n\r\n input_file_path = 'data/all_data.json'\r\n triples_file_path = 'result/1_after_extract.txt'\r\n # execute_file(input_file_path, triples_file_path)\r\n\r\n # 2 clean the triples\r\n # transform the data format\r\n # [['南部临时部队(联黎部队)', '处于', '高度戒备状态'], ['以色列', '空袭', '叙利亚']] ->\r\n # 南部临时部队(联黎部队), 处于, 高度戒备状态\r\n # 以色列, 空袭, 叙利亚\r\n formatted_fp = 'result/1_after_extract_formatted.txt'\r\n format_data(triples_file_path, formatted_fp)\r\n\r\n # using Knowledge Relation Learning (KRL) to score the triples\r\n cleared_file_path = 'result/2_cleared_extract.txt'\r\n clean_triples(train_fp=formatted_fp, output_fp=cleared_file_path, is_train=True)\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import sys
sys.stdin = open("1868_input.txt")
dr = [-1, -1, -1, 0, 0, 1, 1, 1]
dc = [-1, 0, 1, -1, 1, -1, 0, 1]
def is_wall(r, c):
if r < 0 or r >= n or c < 0 or c >= n:
return True
return False
def find(r, c, cnt):
Q = []
Q.append((r, c))
visited[r][c] = 1
while Q:
tr, tc = Q.pop(0)
mine_cnt = 0
for i in range(8):
nr = tr + dr[i]
nc = tc + dc[i]
if not is_wall(nr, nc):
if Map[nr][nc] == '*':
mine_cnt += 1
if not mine_cnt:
for i in range(8):
nr = tr + dr[i]
nc = tc + dc[i]
if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc] == '.':
Q.append((nr, nc))
visited[nr][nc] = 1
Map[tr][tc] = mine_cnt
return cnt + 1
for tc in range(int(input())):
n = int(input())
Map = []
visited = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(n):
tmp = []
for i in input():
tmp.append(i)
Map.append(tmp)
cnt = 0
for i in range(n):
for j in range(n):
if Map[i][j] != '.':
continue
mine = 0
for k in range(8):
nr = i + dr[k]
nc = j + dc[k]
if not is_wall(nr, nc) and Map[nr][nc] == '*':
mine += 1
if not mine:
cnt = find(i, j, cnt)
for i in range(n):
for j in range(n):
if Map[i][j] == '.':
cnt += 1
# for i in range(n//2 + 1):
# for j in range(n//2 + 1):
# # print(i, j, n - i - 1, n - j - 1)
# if Map[i][j] == '.':
# cnt = find(i, j, cnt)
# if Map[n - i - 1][j] == '.':
# cnt = find(n - i - 1, j, cnt)
# if Map[i][n - j - 1] == '.':
# cnt = find(i, n - j - 1, cnt)
# if Map[n - i - 1][n - j - 1] == '.':
# cnt = find(n - i - 1, n - j - 1, cnt)
print(f"#{tc + 1} {cnt}")
|
normal
|
{
"blob_id": "8bce394c651931304f59bbca3e2f019212be9fc1",
"index": 4620,
"step-1": "<mask token>\n\n\ndef is_wall(r, c):\n if r < 0 or r >= n or c < 0 or c >= n:\n return True\n return False\n\n\ndef find(r, c, cnt):\n Q = []\n Q.append((r, c))\n visited[r][c] = 1\n while Q:\n tr, tc = Q.pop(0)\n mine_cnt = 0\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc):\n if Map[nr][nc] == '*':\n mine_cnt += 1\n if not mine_cnt:\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc\n ] == '.':\n Q.append((nr, nc))\n visited[nr][nc] = 1\n Map[tr][tc] = mine_cnt\n return cnt + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_wall(r, c):\n if r < 0 or r >= n or c < 0 or c >= n:\n return True\n return False\n\n\ndef find(r, c, cnt):\n Q = []\n Q.append((r, c))\n visited[r][c] = 1\n while Q:\n tr, tc = Q.pop(0)\n mine_cnt = 0\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc):\n if Map[nr][nc] == '*':\n mine_cnt += 1\n if not mine_cnt:\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc\n ] == '.':\n Q.append((nr, nc))\n visited[nr][nc] = 1\n Map[tr][tc] = mine_cnt\n return cnt + 1\n\n\nfor tc in range(int(input())):\n n = int(input())\n Map = []\n visited = [[(0) for _ in range(n)] for _ in range(n)]\n for _ in range(n):\n tmp = []\n for i in input():\n tmp.append(i)\n Map.append(tmp)\n cnt = 0\n for i in range(n):\n for j in range(n):\n if Map[i][j] != '.':\n continue\n mine = 0\n for k in range(8):\n nr = i + dr[k]\n nc = j + dc[k]\n if not is_wall(nr, nc) and Map[nr][nc] == '*':\n mine += 1\n if not mine:\n cnt = find(i, j, cnt)\n for i in range(n):\n for j in range(n):\n if Map[i][j] == '.':\n cnt += 1\n print(f'#{tc + 1} {cnt}')\n",
"step-3": "<mask token>\nsys.stdin = open('1868_input.txt')\ndr = [-1, -1, -1, 0, 0, 1, 1, 1]\ndc = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n\ndef is_wall(r, c):\n if r < 0 or r >= n or c < 0 or c >= n:\n return True\n return False\n\n\ndef find(r, c, cnt):\n Q = []\n Q.append((r, c))\n visited[r][c] = 1\n while Q:\n tr, tc = Q.pop(0)\n mine_cnt = 0\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc):\n if Map[nr][nc] == '*':\n mine_cnt += 1\n if not mine_cnt:\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc\n ] == '.':\n Q.append((nr, nc))\n visited[nr][nc] = 1\n Map[tr][tc] = mine_cnt\n return cnt + 1\n\n\nfor tc in range(int(input())):\n n = int(input())\n Map = []\n visited = [[(0) for _ in range(n)] for _ in range(n)]\n for _ in range(n):\n tmp = []\n for i in input():\n tmp.append(i)\n Map.append(tmp)\n cnt = 0\n for i in range(n):\n for j in range(n):\n if Map[i][j] != '.':\n continue\n mine = 0\n for k in range(8):\n nr = i + dr[k]\n nc = j + dc[k]\n if not is_wall(nr, nc) and Map[nr][nc] == '*':\n mine += 1\n if not mine:\n cnt = find(i, j, cnt)\n for i in range(n):\n for j in range(n):\n if Map[i][j] == '.':\n cnt += 1\n print(f'#{tc + 1} {cnt}')\n",
"step-4": "import sys\nsys.stdin = open('1868_input.txt')\ndr = [-1, -1, -1, 0, 0, 1, 1, 1]\ndc = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n\ndef is_wall(r, c):\n if r < 0 or r >= n or c < 0 or c >= n:\n return True\n return False\n\n\ndef find(r, c, cnt):\n Q = []\n Q.append((r, c))\n visited[r][c] = 1\n while Q:\n tr, tc = Q.pop(0)\n mine_cnt = 0\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc):\n if Map[nr][nc] == '*':\n mine_cnt += 1\n if not mine_cnt:\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc\n ] == '.':\n Q.append((nr, nc))\n visited[nr][nc] = 1\n Map[tr][tc] = mine_cnt\n return cnt + 1\n\n\nfor tc in range(int(input())):\n n = int(input())\n Map = []\n visited = [[(0) for _ in range(n)] for _ in range(n)]\n for _ in range(n):\n tmp = []\n for i in input():\n tmp.append(i)\n Map.append(tmp)\n cnt = 0\n for i in range(n):\n for j in range(n):\n if Map[i][j] != '.':\n continue\n mine = 0\n for k in range(8):\n nr = i + dr[k]\n nc = j + dc[k]\n if not is_wall(nr, nc) and Map[nr][nc] == '*':\n mine += 1\n if not mine:\n cnt = find(i, j, cnt)\n for i in range(n):\n for j in range(n):\n if Map[i][j] == '.':\n cnt += 1\n print(f'#{tc + 1} {cnt}')\n",
"step-5": "import sys\nsys.stdin = open(\"1868_input.txt\")\n\ndr = [-1, -1, -1, 0, 0, 1, 1, 1]\ndc = [-1, 0, 1, -1, 1, -1, 0, 1]\n\ndef is_wall(r, c):\n if r < 0 or r >= n or c < 0 or c >= n:\n return True\n return False\n\ndef find(r, c, cnt):\n Q = []\n Q.append((r, c))\n visited[r][c] = 1\n while Q:\n tr, tc = Q.pop(0)\n mine_cnt = 0\n\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc):\n if Map[nr][nc] == '*':\n mine_cnt += 1\n if not mine_cnt:\n for i in range(8):\n nr = tr + dr[i]\n nc = tc + dc[i]\n if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc] == '.':\n Q.append((nr, nc))\n visited[nr][nc] = 1\n\n Map[tr][tc] = mine_cnt\n\n\n return cnt + 1\n\n\nfor tc in range(int(input())):\n n = int(input())\n Map = []\n visited = [[0 for _ in range(n)] for _ in range(n)]\n for _ in range(n):\n tmp = []\n for i in input():\n tmp.append(i)\n Map.append(tmp)\n cnt = 0\n\n for i in range(n):\n for j in range(n):\n if Map[i][j] != '.':\n continue\n mine = 0\n for k in range(8):\n nr = i + dr[k]\n nc = j + dc[k]\n if not is_wall(nr, nc) and Map[nr][nc] == '*':\n mine += 1\n if not mine:\n cnt = find(i, j, cnt)\n for i in range(n):\n for j in range(n):\n if Map[i][j] == '.':\n cnt += 1\n\n # for i in range(n//2 + 1):\n # for j in range(n//2 + 1):\n # # print(i, j, n - i - 1, n - j - 1)\n # if Map[i][j] == '.':\n # cnt = find(i, j, cnt)\n # if Map[n - i - 1][j] == '.':\n # cnt = find(n - i - 1, j, cnt)\n # if Map[i][n - j - 1] == '.':\n # cnt = find(i, n - j - 1, cnt)\n # if Map[n - i - 1][n - j - 1] == '.':\n # cnt = find(n - i - 1, n - j - 1, cnt)\n\n print(f\"#{tc + 1} {cnt}\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class hackernewsUpvoter:
<|reserved_special_token_0|>
def sign_in(self, login_page='https://news.ycombinator.com/login'):
self.driver.get(login_page)
time.sleep(2)
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11, 35) / 10)
password.send_keys(Keys.RETURN)
<|reserved_special_token_0|>
def goto_page(self, page):
self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name('morelink')
more[0].click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class hackernewsUpvoter:
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page='https://news.ycombinator.com/login'):
self.driver.get(login_page)
time.sleep(2)
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11, 35) / 10)
password.send_keys(Keys.RETURN)
<|reserved_special_token_0|>
def goto_page(self, page):
self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name('morelink')
more[0].click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class hackernewsUpvoter:
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page='https://news.ycombinator.com/login'):
self.driver.get(login_page)
time.sleep(2)
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11, 35) / 10)
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name('votearrow')
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name('morelink')
more[0].click()
<|reserved_special_token_0|>
bot.sign_in()
for i in range(3, 5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300, 500) / 100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PATH = 'C:\\Program Files (x86)\\chromedriver.exe'
destination = 'https://news.ycombinator.com/'
class hackernewsUpvoter:
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page='https://news.ycombinator.com/login'):
self.driver.get(login_page)
time.sleep(2)
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11, 35) / 10)
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name('votearrow')
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name('morelink')
more[0].click()
bot = hackernewsUpvoter(input(), input(), destination)
bot.sign_in()
for i in range(3, 5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300, 500) / 100)
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
PATH = "C:\\Program Files (x86)\\chromedriver.exe"
destination = "https://news.ycombinator.com/"
class hackernewsUpvoter():
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page="https://news.ycombinator.com/login"):
# Go to hackernews's website
self.driver.get(login_page)
time.sleep(2)
# Enter username
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
# Enter password
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11,35)/10)
# Click enter key
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name("votearrow")
# Click every upvote buttons in the page
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get("https://news.ycombinator.com/news?p={}".format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name("morelink")
more[0].click()
bot = hackernewsUpvoter(input(), input(), destination)
bot.sign_in()
for i in range(3,5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300,500)/100)
|
flexible
|
{
"blob_id": "742b655ee6aad2575f67e7329ed7a14c4fb6aa06",
"index": 7242,
"step-1": "<mask token>\n\n\nclass hackernewsUpvoter:\n <mask token>\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-4": "<mask token>\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndestination = 'https://news.ycombinator.com/'\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport random\n\nPATH = \"C:\\\\Program Files (x86)\\\\chromedriver.exe\"\ndestination = \"https://news.ycombinator.com/\"\n\nclass hackernewsUpvoter():\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH) \n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page=\"https://news.ycombinator.com/login\"):\n # Go to hackernews's website\n self.driver.get(login_page)\n time.sleep(2)\n\n # Enter username \n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n\n # Enter password\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11,35)/10)\n\n # Click enter key\n password.send_keys(Keys.RETURN)\n \n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name(\"votearrow\")\n\n # Click every upvote buttons in the page \n for button in upvoteButtons:\n try: \n button.click()\n time.sleep(1)\n except: \n print(\"The upvote button wasn't clickable\")\n pass\n \n def goto_page(self, page):\n self.driver.get(\"https://news.ycombinator.com/news?p={}\".format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name(\"morelink\")\n more[0].click()\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\n\nfor i in range(3,5):\n bot.upvoter() \n bot.goto_page(i)\n time.sleep(random.randrange(300,500)/100)\n\n\n\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
from texttable import Texttable
from nexuscli import exception
from nexuscli.api import cleanup_policy
from nexuscli.cli import constants
def cmd_list(nexus_client):
"""Performs ``nexus3 cleanup_policy list``"""
policies = nexus_client.cleanup_policies.list()
if len(policies) == 0:
return exception.CliReturnCode.POLICY_NOT_FOUND.value
table = Texttable(max_width=constants.TTY_MAX_WIDTH)
table.add_row(
['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])
table.set_deco(Texttable.HEADER)
for policy in policies:
p = policy.configuration
table.add_row([
p['name'], p['format'],
p['criteria'].get('lastDownloaded', 'null'),
p['criteria'].get('lastBlobUpdated', 'null'),
p['criteria'].get('regex', 'null')],
)
print(table.draw())
return exception.CliReturnCode.SUCCESS.value
def cmd_create(nexus_client, **kwargs):
"""Performs ``nexus3 cleanup_policy create``"""
policy = cleanup_policy.CleanupPolicy(None, **kwargs)
nexus_client.cleanup_policies.create_or_update(policy)
return exception.CliReturnCode.SUCCESS.value
|
normal
|
{
"blob_id": "521b90ffb4bace4cbd50d08ed4be278d4f259822",
"index": 7049,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-3": "<mask token>\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([p['name'], p['format'], p['criteria'].get(\n 'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',\n 'null'), p['criteria'].get('regex', 'null')])\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-4": "from texttable import Texttable\nfrom nexuscli import exception\nfrom nexuscli.api import cleanup_policy\nfrom nexuscli.cli import constants\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([p['name'], p['format'], p['criteria'].get(\n 'lastDownloaded', 'null'), p['criteria'].get('lastBlobUpdated',\n 'null'), p['criteria'].get('regex', 'null')])\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n return exception.CliReturnCode.SUCCESS.value\n",
"step-5": "from texttable import Texttable\n\nfrom nexuscli import exception\nfrom nexuscli.api import cleanup_policy\nfrom nexuscli.cli import constants\n\n\ndef cmd_list(nexus_client):\n \"\"\"Performs ``nexus3 cleanup_policy list``\"\"\"\n policies = nexus_client.cleanup_policies.list()\n if len(policies) == 0:\n return exception.CliReturnCode.POLICY_NOT_FOUND.value\n\n table = Texttable(max_width=constants.TTY_MAX_WIDTH)\n table.add_row(\n ['Name', 'Format', 'Downloaded', 'Updated', 'Regex'])\n table.set_deco(Texttable.HEADER)\n for policy in policies:\n p = policy.configuration\n table.add_row([\n p['name'], p['format'],\n p['criteria'].get('lastDownloaded', 'null'),\n p['criteria'].get('lastBlobUpdated', 'null'),\n p['criteria'].get('regex', 'null')],\n )\n\n print(table.draw())\n return exception.CliReturnCode.SUCCESS.value\n\n\ndef cmd_create(nexus_client, **kwargs):\n \"\"\"Performs ``nexus3 cleanup_policy create``\"\"\"\n policy = cleanup_policy.CleanupPolicy(None, **kwargs)\n nexus_client.cleanup_policies.create_or_update(policy)\n\n return exception.CliReturnCode.SUCCESS.value\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SendMes(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SendMes(object):
REGION = 'cn-hangzhou'
PRODUCT_NAME = 'Dysmsapi'
DOMAIN = 'dysmsapi.aliyuncs.com'
ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'
ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
def send_2_mes(self, phone_numbers, code):
sign_name = 'SpiritBlog'
template_code = 'SMS_137657397'
business_id = uuid.uuid1()
template_param = '{"code":"%s"}' % code
smsRequest = SendSmsRequest.SendSmsRequest()
smsRequest.set_TemplateCode(template_code)
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
smsRequest.set_OutId(business_id)
smsRequest.set_SignName(sign_name)
smsRequest.set_PhoneNumbers(phone_numbers)
smsResponse = self.acs_client.do_action_with_exception(smsRequest)
return smsResponse
<|reserved_special_token_1|>
import uuid
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.profile import region_provider
from celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest
class SendMes(object):
REGION = 'cn-hangzhou'
PRODUCT_NAME = 'Dysmsapi'
DOMAIN = 'dysmsapi.aliyuncs.com'
ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'
ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
def send_2_mes(self, phone_numbers, code):
sign_name = 'SpiritBlog'
template_code = 'SMS_137657397'
business_id = uuid.uuid1()
template_param = '{"code":"%s"}' % code
smsRequest = SendSmsRequest.SendSmsRequest()
smsRequest.set_TemplateCode(template_code)
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
smsRequest.set_OutId(business_id)
smsRequest.set_SignName(sign_name)
smsRequest.set_PhoneNumbers(phone_numbers)
smsResponse = self.acs_client.do_action_with_exception(smsRequest)
return smsResponse
<|reserved_special_token_1|>
import uuid
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.profile import region_provider
# 注意:不要更改
from celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest
class SendMes(object):
REGION = "cn-hangzhou"
PRODUCT_NAME = "Dysmsapi"
DOMAIN = "dysmsapi.aliyuncs.com"
# 申请的ACCESS_KEY_ID和ACCESS_KEY_SECRET
ACCESS_KEY_ID = "LTAIYEeWFSUAFcYy"
ACCESS_KEY_SECRET = "FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY"
acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)
region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)
def send_2_mes(self, phone_numbers, code):
# 申请的短信签名 和 短信模板
sign_name = 'SpiritBlog'
template_code = 'SMS_137657397'
business_id = uuid.uuid1()
template_param = '{"code":"%s"}' % code
smsRequest = SendSmsRequest.SendSmsRequest()
# 申请的短信模板编码,必填
smsRequest.set_TemplateCode(template_code)
# 短信模板变量参数
if template_param is not None:
smsRequest.set_TemplateParam(template_param)
# 设置业务请求流水号,必填。
smsRequest.set_OutId(business_id)
# 短信签名
smsRequest.set_SignName(sign_name)
# 短信发送的号码列表,必填。
smsRequest.set_PhoneNumbers(phone_numbers)
# 调用短信发送接口,返回json
smsResponse = self.acs_client.do_action_with_exception(smsRequest)
return smsResponse
# sm = SendMes()
# sm.send_2_mes(15071176826, 333333)
|
flexible
|
{
"blob_id": "daecbf5280c199b31f3b9d9818df245d9cd165a7",
"index": 4295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SendMes(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SendMes(object):\n REGION = 'cn-hangzhou'\n PRODUCT_NAME = 'Dysmsapi'\n DOMAIN = 'dysmsapi.aliyuncs.com'\n ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'\n ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'\n acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n def send_2_mes(self, phone_numbers, code):\n sign_name = 'SpiritBlog'\n template_code = 'SMS_137657397'\n business_id = uuid.uuid1()\n template_param = '{\"code\":\"%s\"}' % code\n smsRequest = SendSmsRequest.SendSmsRequest()\n smsRequest.set_TemplateCode(template_code)\n if template_param is not None:\n smsRequest.set_TemplateParam(template_param)\n smsRequest.set_OutId(business_id)\n smsRequest.set_SignName(sign_name)\n smsRequest.set_PhoneNumbers(phone_numbers)\n smsResponse = self.acs_client.do_action_with_exception(smsRequest)\n return smsResponse\n",
"step-4": "import uuid\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.profile import region_provider\nfrom celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\n\n\nclass SendMes(object):\n REGION = 'cn-hangzhou'\n PRODUCT_NAME = 'Dysmsapi'\n DOMAIN = 'dysmsapi.aliyuncs.com'\n ACCESS_KEY_ID = 'LTAIYEeWFSUAFcYy'\n ACCESS_KEY_SECRET = 'FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY'\n acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n region_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n def send_2_mes(self, phone_numbers, code):\n sign_name = 'SpiritBlog'\n template_code = 'SMS_137657397'\n business_id = uuid.uuid1()\n template_param = '{\"code\":\"%s\"}' % code\n smsRequest = SendSmsRequest.SendSmsRequest()\n smsRequest.set_TemplateCode(template_code)\n if template_param is not None:\n smsRequest.set_TemplateParam(template_param)\n smsRequest.set_OutId(business_id)\n smsRequest.set_SignName(sign_name)\n smsRequest.set_PhoneNumbers(phone_numbers)\n smsResponse = self.acs_client.do_action_with_exception(smsRequest)\n return smsResponse\n",
"step-5": "import uuid\nfrom aliyunsdkcore.client import AcsClient\nfrom aliyunsdkcore.profile import region_provider\n\n\n# 注意:不要更改\nfrom celery_tasks.sms.dysms_python.build.lib.aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\n\n\nclass SendMes(object):\n\tREGION = \"cn-hangzhou\"\n\tPRODUCT_NAME = \"Dysmsapi\"\n\tDOMAIN = \"dysmsapi.aliyuncs.com\"\n\n\t# 申请的ACCESS_KEY_ID和ACCESS_KEY_SECRET\n\tACCESS_KEY_ID = \"LTAIYEeWFSUAFcYy\"\n\tACCESS_KEY_SECRET = \"FeuGEGSeHXHJ7A4uFIO0mMLoGjKiiY\"\n\n\tacs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET, REGION)\n\tregion_provider.add_endpoint(PRODUCT_NAME, REGION, DOMAIN)\n\n\tdef send_2_mes(self, phone_numbers, code):\n\t\t# 申请的短信签名 和 短信模板\n\t\tsign_name = 'SpiritBlog'\n\t\ttemplate_code = 'SMS_137657397'\n\t\tbusiness_id = uuid.uuid1()\n\t\ttemplate_param = '{\"code\":\"%s\"}' % code\n\t\tsmsRequest = SendSmsRequest.SendSmsRequest()\n\t\t# 申请的短信模板编码,必填\n\t\tsmsRequest.set_TemplateCode(template_code)\n\n\t\t# 短信模板变量参数\n\t\tif template_param is not None:\n\t\t\tsmsRequest.set_TemplateParam(template_param)\n\n\t\t# 设置业务请求流水号,必填。\n\t\tsmsRequest.set_OutId(business_id)\n\n\t\t# 短信签名\n\t\tsmsRequest.set_SignName(sign_name)\n\n\t\t# 短信发送的号码列表,必填。\n\t\tsmsRequest.set_PhoneNumbers(phone_numbers)\n\n\t\t# 调用短信发送接口,返回json\n\t\tsmsResponse = self.acs_client.do_action_with_exception(smsRequest)\n\t\treturn smsResponse\n\n# sm = SendMes()\n# sm.send_2_mes(15071176826, 333333)\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
from gerador_senha import gerar_senha
gerar_senha()
|
normal
|
{
"blob_id": "e81da535408cc36655328b37ca99b4f775f3a78e",
"index": 8435,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngerar_senha()\n",
"step-3": "from gerador_senha import gerar_senha\ngerar_senha()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def area (a, b):
resultado = a * b
return (resultado)
def main():
#escribe tu código abajo de esta línea
num1 = float(input("INTRODUCE LA BASE: "))
num2 = float(input("INTRODUCE LA ALTURA: "))
print ("EL AREA DEL RECTANGULO ES: ", area (num1, num2))
pass
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "282dbdb3a8d9ed914e8ca5c7fa74d2873920e18c",
"index": 7308,
"step-1": "<mask token>\n",
"step-2": "def area(a, b):\n resultado = a * b\n return resultado\n\n\n<mask token>\n",
"step-3": "def area(a, b):\n resultado = a * b\n return resultado\n\n\ndef main():\n num1 = float(input('INTRODUCE LA BASE: '))\n num2 = float(input('INTRODUCE LA ALTURA: '))\n print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))\n\n\n<mask token>\n",
"step-4": "def area(a, b):\n resultado = a * b\n return resultado\n\n\ndef main():\n num1 = float(input('INTRODUCE LA BASE: '))\n num2 = float(input('INTRODUCE LA ALTURA: '))\n print('EL AREA DEL RECTANGULO ES: ', area(num1, num2))\n\n\npass\nif __name__ == '__main__':\n main()\n",
"step-5": "def area (a, b):\n resultado = a * b \n return (resultado)\n\ndef main():\n #escribe tu código abajo de esta línea\n num1 = float(input(\"INTRODUCE LA BASE: \"))\n num2 = float(input(\"INTRODUCE LA ALTURA: \"))\n\n print (\"EL AREA DEL RECTANGULO ES: \", area (num1, num2))\n\npass\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("http://192.168.1.248:9079/#/")
lanuage = driver.find_element_by_class_name("el-dropdown-trigger-text")
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name("el-dropdown-trigger-text").text =="中文"
print("符合要求")
except EOFError:
print("不是中文")
# driver.find_element_by_link_text("简体中文")
|
normal
|
{
"blob_id": "6a1f58af26bbc4d584ffd699c512ef433ffb80d8",
"index": 7206,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('http://192.168.1.248:9079/#/')\n<mask token>\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-4": "from selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-5": "from selenium import webdriver\n\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://192.168.1.248:9079/#/\")\n\n\nlanuage = driver.find_element_by_class_name(\"el-dropdown-trigger-text\")\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name(\"el-dropdown-trigger-text\").text ==\"中文\"\n print(\"符合要求\")\nexcept EOFError:\n print(\"不是中文\") \n# driver.find_element_by_link_text(\"简体中文\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class CenterOriginToZero(bpy.types.Operator):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
if __name__ == '__main__':
register()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bl_info = {'name': 'Ratchets Center All Objects', 'author': 'Ratchet3789',
'version': (0, 1, 0), 'description':
'Centers all selected objects. Built for Game Development.', 'category':
'Object'}
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script"""
bl_idname = 'object.center_all_in_level'
bl_label = 'Center Origin (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.location = 0, 0, 0
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.snap_to_origin'
bl_label = 'Center Mesh (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = 'object.absolute_center_all_in_level'
bl_label = 'Center All (Zero)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')
x.location = 0, 0, 0
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
if __name__ == '__main__':
register()
<|reserved_special_token_1|>
import bpy
bl_info = {
"name": "Ratchets Center All Objects",
"author": "Ratchet3789",
"version": (0, 1, 0),
"description": "Centers all selected objects. Built for Game Development.",
"category": "Object",
}
class CenterOriginToZero(bpy.types.Operator):
"""Center all objects script""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "object.center_all_in_level" # unique identifier for buttons and menu items to reference.
bl_label = "Center Origin (Zero)" # display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
# execute() is called by blender when running the operator.
def execute(self, context):
# The original script
for x in bpy.context.selected_objects:
x.location = (0, 0, 0)
# this lets blender know the operator finished successfully.
return {'FINISHED'}
class SnapMeshToOrigin(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.snap_to_origin"
bl_label = "Center Mesh (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
return {'FINISHED'}
class AbsoluteCenterObjects(bpy.types.Operator):
"""ABSOLUTE Zero of all objects within the scene"""
bl_idname = "object.absolute_center_all_in_level"
bl_label = "Center All (Zero)"
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.
def execute(self, context):
for x in bpy.context.selected_objects:
x.select = True
bpy.ops.object.origin_set(type="GEOMETRY_ORIGIN")
x.location = (0, 0, 0)
return {'FINISHED'}
def register():
bpy.utils.register_class(CenterOriginToZero)
bpy.utils.register_class(SnapMeshToOrigin)
bpy.utils.register_class(AbsoluteCenterObjects)
def unregister():
bpy.utils.unregister_class(CenterOriginToZero)
bpy.utils.unregister_class(SnapMeshToOrigin)
bpy.utils.unregister_class(AbsoluteCenterObjects)
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it.
if __name__ == "__main__":
register()
|
flexible
|
{
"blob_id": "f7a511beaea869cf32eb905a4f3685077297a5ec",
"index": 1654,
"step-1": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\nif __name__ == '__main__':\n register()\n",
"step-4": "<mask token>\nbl_info = {'name': 'Ratchets Center All Objects', 'author': 'Ratchet3789',\n 'version': (0, 1, 0), 'description':\n 'Centers all selected objects. Built for Game Development.', 'category':\n 'Object'}\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\"\n bl_idname = 'object.center_all_in_level'\n bl_label = 'Center Origin (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.snap_to_origin'\n bl_label = 'Center Mesh (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n return {'FINISHED'}\n\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = 'object.absolute_center_all_in_level'\n bl_label = 'Center All (Zero)'\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN')\n x.location = 0, 0, 0\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n\nif __name__ == '__main__':\n register()\n",
"step-5": "import bpy\nbl_info = {\n \"name\": \"Ratchets Center All Objects\",\n \"author\": \"Ratchet3789\",\n \"version\": (0, 1, 0),\n \"description\": \"Centers all selected objects. Built for Game Development.\",\n \"category\": \"Object\",\n}\n\n\nclass CenterOriginToZero(bpy.types.Operator):\n \"\"\"Center all objects script\"\"\" # blender will use this as a tooltip for menu items and buttons.\n bl_idname = \"object.center_all_in_level\" # unique identifier for buttons and menu items to reference.\n bl_label = \"Center Origin (Zero)\"\t\t\t# display name in the interface.\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n # execute() is called by blender when running the operator.\n def execute(self, context):\n\n # The original script\n for x in bpy.context.selected_objects:\n x.location = (0, 0, 0)\n # this lets blender know the operator finished successfully.\n return {'FINISHED'}\n\nclass SnapMeshToOrigin(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = \"object.snap_to_origin\"\n bl_label = \"Center Mesh (Zero)\"\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n def execute(self, context):\n \n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\")\n return {'FINISHED'}\n\nclass AbsoluteCenterObjects(bpy.types.Operator):\n \"\"\"ABSOLUTE Zero of all objects within the scene\"\"\"\n bl_idname = \"object.absolute_center_all_in_level\"\n bl_label = \"Center All (Zero)\"\n bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator.\n\n def execute(self, context):\n for x in bpy.context.selected_objects:\n x.select = True\n bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\")\n x.location = (0, 0, 0)\n return {'FINISHED'}\n\n\ndef register():\n bpy.utils.register_class(CenterOriginToZero)\n bpy.utils.register_class(SnapMeshToOrigin)\n bpy.utils.register_class(AbsoluteCenterObjects)\n\ndef unregister():\n bpy.utils.unregister_class(CenterOriginToZero)\n bpy.utils.unregister_class(SnapMeshToOrigin)\n bpy.utils.unregister_class(AbsoluteCenterObjects)\n\n# This allows you to run the script directly from blenders text editor\n# to test the addon without having to install it.\nif __name__ == \"__main__\":\n register()\n",
"step-ids": [
10,
14,
15,
16,
18
]
}
|
[
10,
14,
15,
16,
18
] |
thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict["modal"]
print("model:", module)
thisdict = {"brand": "ford", "model": "Mustang", "year": 1964}
module = thisdict.get["modal"]
print("model:", module)
|
normal
|
{
"blob_id": "3d854c83488eeafa035ccf5d333eeeae63505255",
"index": 6908,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('model:', module)\n<mask token>\nprint('model:', module)\n",
"step-3": "thisdict = {'brand': 'ford', 'model': 'Mustang', 'year': 1964}\nmodule = thisdict['modal']\nprint('model:', module)\nthisdict = {'brand': 'ford', 'model': 'Mustang', 'year': 1964}\nmodule = thisdict.get['modal']\nprint('model:', module)\n",
"step-4": "thisdict = {\"brand\": \"ford\", \"model\": \"Mustang\", \"year\": 1964}\nmodule = thisdict[\"modal\"]\nprint(\"model:\", module)\n\nthisdict = {\"brand\": \"ford\", \"model\": \"Mustang\", \"year\": 1964}\nmodule = thisdict.get[\"modal\"]\nprint(\"model:\", module)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class NURBS:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def addKnot(self, knot: float) ->None:
self._knots.append(knot)
def pointCount(self) ->int:
return len(self._points)
<|reserved_special_token_0|>
def _N(self, i: int, n: int, u: float) ->float:
if n == 0:
if self._knots[i] <= u <= self._knots[i + 1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NURBS:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def addKnot(self, knot: float) ->None:
self._knots.append(knot)
def pointCount(self) ->int:
return len(self._points)
def calculate(self, segments: int) ->List[complex]:
while len(self._weights) < len(self._points):
self._weights.append(1.0)
ret = []
for n in range(0, segments):
u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (
segments - 1)
nku = []
for m in range(0, len(self._points)):
nku.append(self._weights[m] * self._N(m, self._degree, u))
point = complex(0, 0)
denom = sum(nku)
for m in range(0, len(self._points)):
if nku[m] != 0.0 and denom != 0.0:
r_iku = nku[m] / denom
if r_iku != 0.0:
point += self._points[m] * r_iku
ret.append(point)
return ret
def _N(self, i: int, n: int, u: float) ->float:
if n == 0:
if self._knots[i] <= u <= self._knots[i + 1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NURBS:
def __init__(self, degree: int) ->None:
self._degree = degree
self._points = []
self._weights = []
self._knots = []
<|reserved_special_token_0|>
def addKnot(self, knot: float) ->None:
self._knots.append(knot)
def pointCount(self) ->int:
return len(self._points)
def calculate(self, segments: int) ->List[complex]:
while len(self._weights) < len(self._points):
self._weights.append(1.0)
ret = []
for n in range(0, segments):
u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (
segments - 1)
nku = []
for m in range(0, len(self._points)):
nku.append(self._weights[m] * self._N(m, self._degree, u))
point = complex(0, 0)
denom = sum(nku)
for m in range(0, len(self._points)):
if nku[m] != 0.0 and denom != 0.0:
r_iku = nku[m] / denom
if r_iku != 0.0:
point += self._points[m] * r_iku
ret.append(point)
return ret
def _N(self, i: int, n: int, u: float) ->float:
if n == 0:
if self._knots[i] <= u <= self._knots[i + 1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NURBS:
def __init__(self, degree: int) ->None:
self._degree = degree
self._points = []
self._weights = []
self._knots = []
def addPoint(self, p: complex) ->None:
self._points.append(p)
def addKnot(self, knot: float) ->None:
self._knots.append(knot)
def pointCount(self) ->int:
return len(self._points)
def calculate(self, segments: int) ->List[complex]:
while len(self._weights) < len(self._points):
self._weights.append(1.0)
ret = []
for n in range(0, segments):
u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (
segments - 1)
nku = []
for m in range(0, len(self._points)):
nku.append(self._weights[m] * self._N(m, self._degree, u))
point = complex(0, 0)
denom = sum(nku)
for m in range(0, len(self._points)):
if nku[m] != 0.0 and denom != 0.0:
r_iku = nku[m] / denom
if r_iku != 0.0:
point += self._points[m] * r_iku
ret.append(point)
return ret
def _N(self, i: int, n: int, u: float) ->float:
if n == 0:
if self._knots[i] <= u <= self._knots[i + 1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) ->float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
<|reserved_special_token_1|>
from typing import List
class NURBS:
def __init__(self, degree: int) -> None:
self._degree = degree
self._points = [] # type: List[complex]
self._weights = [] # type: List[float]
self._knots = [] # type: List[float]
def addPoint(self, p: complex) -> None:
self._points.append(p)
def addKnot(self, knot: float) -> None:
self._knots.append(knot)
def pointCount(self) -> int:
return len(self._points)
def calculate(self, segments: int) -> List[complex]:
while len(self._weights) < len(self._points):
self._weights.append(1.0)
ret = []
for n in range(0, segments):
u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (segments - 1)
nku = []
for m in range(0, len(self._points)):
nku.append(self._weights[m] * self._N(m, self._degree, u))
point = complex(0, 0)
denom = sum(nku)
for m in range(0, len(self._points)):
if nku[m] != 0.0 and denom != 0.0:
r_iku = nku[m] / denom
if r_iku != 0.0:
point += self._points[m] * r_iku
ret.append(point)
return ret
def _N(self, i: int, n: int, u: float) -> float:
if n == 0:
if self._knots[i] <= u <= self._knots[i+1]:
return 1
return 0
else:
Nin1u = self._N(i, n - 1, u)
Ni1n1u = self._N(i + 1, n - 1, u)
if Nin1u == 0.0:
a = 0.0
else:
a = self._F(i, n, u) * Nin1u
if Ni1n1u == 0.0:
b = 0.0
else:
b = self._G(i, n, u) * Ni1n1u
return a + b
def _F(self, i: int, n: int, u: float) -> float:
denom = self._knots[i + n] - self._knots[i]
if denom == 0.0:
return 0.0
return (u - self._knots[i]) / denom
def _G(self, i: int, n: int, u: float) -> float:
denom = self._knots[i + n + 1] - self._knots[i]
if denom == 0:
return 0.0
return (self._knots[i + n + 1] - u) / denom
|
flexible
|
{
"blob_id": "40b3cacf55f6c5056c3541d70d8b2c0e2cc7d01b",
"index": 2564,
"step-1": "<mask token>\n\n\nclass NURBS:\n <mask token>\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n <mask token>\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-2": "<mask token>\n\n\nclass NURBS:\n <mask token>\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-3": "<mask token>\n\n\nclass NURBS:\n\n def __init__(self, degree: int) ->None:\n self._degree = degree\n self._points = []\n self._weights = []\n self._knots = []\n <mask token>\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-4": "<mask token>\n\n\nclass NURBS:\n\n def __init__(self, degree: int) ->None:\n self._degree = degree\n self._points = []\n self._weights = []\n self._knots = []\n\n def addPoint(self, p: complex) ->None:\n self._points.append(p)\n\n def addKnot(self, knot: float) ->None:\n self._knots.append(knot)\n\n def pointCount(self) ->int:\n return len(self._points)\n\n def calculate(self, segments: int) ->List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (\n segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) ->float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i + 1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) ->float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-5": "from typing import List\n\n\nclass NURBS:\n def __init__(self, degree: int) -> None:\n self._degree = degree\n self._points = [] # type: List[complex]\n self._weights = [] # type: List[float]\n self._knots = [] # type: List[float]\n\n def addPoint(self, p: complex) -> None:\n self._points.append(p)\n\n def addKnot(self, knot: float) -> None:\n self._knots.append(knot)\n\n def pointCount(self) -> int:\n return len(self._points)\n\n def calculate(self, segments: int) -> List[complex]:\n while len(self._weights) < len(self._points):\n self._weights.append(1.0)\n\n ret = []\n for n in range(0, segments):\n u = self._knots[0] + (self._knots[-1] - self._knots[0]) * n / (segments - 1)\n nku = []\n for m in range(0, len(self._points)):\n nku.append(self._weights[m] * self._N(m, self._degree, u))\n\n point = complex(0, 0)\n denom = sum(nku)\n for m in range(0, len(self._points)):\n if nku[m] != 0.0 and denom != 0.0:\n r_iku = nku[m] / denom\n if r_iku != 0.0:\n point += self._points[m] * r_iku\n\n ret.append(point)\n return ret\n\n def _N(self, i: int, n: int, u: float) -> float:\n if n == 0:\n if self._knots[i] <= u <= self._knots[i+1]:\n return 1\n return 0\n else:\n Nin1u = self._N(i, n - 1, u)\n Ni1n1u = self._N(i + 1, n - 1, u)\n if Nin1u == 0.0:\n a = 0.0\n else:\n a = self._F(i, n, u) * Nin1u\n if Ni1n1u == 0.0:\n b = 0.0\n else:\n b = self._G(i, n, u) * Ni1n1u\n return a + b\n\n def _F(self, i: int, n: int, u: float) -> float:\n denom = self._knots[i + n] - self._knots[i]\n if denom == 0.0:\n return 0.0\n return (u - self._knots[i]) / denom\n\n def _G(self, i: int, n: int, u: float) -> float:\n denom = self._knots[i + n + 1] - self._knots[i]\n if denom == 0:\n return 0.0\n return (self._knots[i + n + 1] - u) / denom\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 18:40:07 2021
@author: tomachache
"""
import numpy as np
from qiskit import *
# Various state preparation
def state_preparation(m, name, p):
# m : nb of qubits
# name : name of the state we want
# p : proba associated with noise
circ = QuantumCircuit(m, name = 'State prep')
if name == 'GHZ':
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
elif name == 'noisy_GHZ_bitflip':
prob = np.random.rand(m)
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
if prob[k] <= p: # flips each bit with proba p
circ.x(k)
if prob[0] <= p:
circ.x(0)
elif name == 'noisy_GHZ_QDC':
probas = [1 - 3*p/4, p/4, p/4, p/4]
gate_inds = np.random.choice(np.arange(4), size = m, p = probas)
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
if gate_inds[k] == 1:
circ.x(k)
elif gate_inds[k] == 2:
circ.y(k)
elif gate_inds[k] == 3:
circ.z(k)
if gate_inds[0] == 1:
circ.x(0)
elif gate_inds[0] == 2:
circ.y(0)
elif gate_inds[0] == 3:
circ.z(0)
elif name == 'rigged_QDC': # QDC where 1st and 2nd qubits have different probas
probas_rigged = [1-p, p/2, p/2, 0]
probas_rigged2 = [1 - 29*p/30, 2*p/5, 2*p/5, p/6]
probas = [1 - 3*p/4, p/4, p/4, p/4]
gate_inds = np.random.choice(np.arange(4), size = m - 1, p = probas)
gate_inds_r = np.random.choice(np.arange(4), p = probas_rigged)
gate_inds_r2 = np.random.choice(np.arange(4), p = probas_rigged2)
circ.h(0)
circ.cx(0,1)
if gate_inds_r2 == 1:
circ.x(1)
elif gate_inds_r2 == 2:
circ.y(1)
elif gate_inds_r2 == 3:
circ.z(1)
for k in range(2,m):
circ.cx(0,k)
if gate_inds[k-1] == 1:
circ.x(k)
elif gate_inds[k-1] == 2:
circ.y(k)
elif gate_inds[k-1] == 3:
circ.z(k)
if gate_inds_r == 1:
circ.x(0)
elif gate_inds_r == 2:
circ.y(0)
elif gate_inds_r == 3:
circ.z(0)
else:
raise ValueError('Unrecognized name.')
return circ
|
normal
|
{
"blob_id": "6962bf99e3ecae473af54ded33fde09527cb82c0",
"index": 8284,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef state_preparation(m, name, p):\n circ = QuantumCircuit(m, name='State prep')\n if name == 'GHZ':\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n elif name == 'noisy_GHZ_bitflip':\n prob = np.random.rand(m)\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n if prob[k] <= p:\n circ.x(k)\n if prob[0] <= p:\n circ.x(0)\n elif name == 'noisy_GHZ_QDC':\n probas = [1 - 3 * p / 4, p / 4, p / 4, p / 4]\n gate_inds = np.random.choice(np.arange(4), size=m, p=probas)\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n if gate_inds[k] == 1:\n circ.x(k)\n elif gate_inds[k] == 2:\n circ.y(k)\n elif gate_inds[k] == 3:\n circ.z(k)\n if gate_inds[0] == 1:\n circ.x(0)\n elif gate_inds[0] == 2:\n circ.y(0)\n elif gate_inds[0] == 3:\n circ.z(0)\n elif name == 'rigged_QDC':\n probas_rigged = [1 - p, p / 2, p / 2, 0]\n probas_rigged2 = [1 - 29 * p / 30, 2 * p / 5, 2 * p / 5, p / 6]\n probas = [1 - 3 * p / 4, p / 4, p / 4, p / 4]\n gate_inds = np.random.choice(np.arange(4), size=m - 1, p=probas)\n gate_inds_r = np.random.choice(np.arange(4), p=probas_rigged)\n gate_inds_r2 = np.random.choice(np.arange(4), p=probas_rigged2)\n circ.h(0)\n circ.cx(0, 1)\n if gate_inds_r2 == 1:\n circ.x(1)\n elif gate_inds_r2 == 2:\n circ.y(1)\n elif gate_inds_r2 == 3:\n circ.z(1)\n for k in range(2, m):\n circ.cx(0, k)\n if gate_inds[k - 1] == 1:\n circ.x(k)\n elif gate_inds[k - 1] == 2:\n circ.y(k)\n elif gate_inds[k - 1] == 3:\n circ.z(k)\n if gate_inds_r == 1:\n circ.x(0)\n elif gate_inds_r == 2:\n circ.y(0)\n elif gate_inds_r == 3:\n circ.z(0)\n else:\n raise ValueError('Unrecognized name.')\n return circ\n",
"step-3": "<mask token>\nimport numpy as np\nfrom qiskit import *\n\n\ndef state_preparation(m, name, p):\n circ = QuantumCircuit(m, name='State prep')\n if name == 'GHZ':\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n elif name == 'noisy_GHZ_bitflip':\n prob = np.random.rand(m)\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n if prob[k] <= p:\n circ.x(k)\n if prob[0] <= p:\n circ.x(0)\n elif name == 'noisy_GHZ_QDC':\n probas = [1 - 3 * p / 4, p / 4, p / 4, p / 4]\n gate_inds = np.random.choice(np.arange(4), size=m, p=probas)\n circ.h(0)\n for k in range(1, m):\n circ.cx(0, k)\n if gate_inds[k] == 1:\n circ.x(k)\n elif gate_inds[k] == 2:\n circ.y(k)\n elif gate_inds[k] == 3:\n circ.z(k)\n if gate_inds[0] == 1:\n circ.x(0)\n elif gate_inds[0] == 2:\n circ.y(0)\n elif gate_inds[0] == 3:\n circ.z(0)\n elif name == 'rigged_QDC':\n probas_rigged = [1 - p, p / 2, p / 2, 0]\n probas_rigged2 = [1 - 29 * p / 30, 2 * p / 5, 2 * p / 5, p / 6]\n probas = [1 - 3 * p / 4, p / 4, p / 4, p / 4]\n gate_inds = np.random.choice(np.arange(4), size=m - 1, p=probas)\n gate_inds_r = np.random.choice(np.arange(4), p=probas_rigged)\n gate_inds_r2 = np.random.choice(np.arange(4), p=probas_rigged2)\n circ.h(0)\n circ.cx(0, 1)\n if gate_inds_r2 == 1:\n circ.x(1)\n elif gate_inds_r2 == 2:\n circ.y(1)\n elif gate_inds_r2 == 3:\n circ.z(1)\n for k in range(2, m):\n circ.cx(0, k)\n if gate_inds[k - 1] == 1:\n circ.x(k)\n elif gate_inds[k - 1] == 2:\n circ.y(k)\n elif gate_inds[k - 1] == 3:\n circ.z(k)\n if gate_inds_r == 1:\n circ.x(0)\n elif gate_inds_r == 2:\n circ.y(0)\n elif gate_inds_r == 3:\n circ.z(0)\n else:\n raise ValueError('Unrecognized name.')\n return circ\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 25 18:40:07 2021\n\n@author: tomachache\n\"\"\"\n\nimport numpy as np\n\nfrom qiskit import *\n\n\n# Various state preparation\ndef state_preparation(m, name, p): \n # m : nb of qubits \n # name : name of the state we want \n # p : proba associated with noise\n \n circ = QuantumCircuit(m, name = 'State prep')\n \n if name == 'GHZ':\n circ.h(0)\n for k in range(1,m):\n circ.cx(0,k)\n \n elif name == 'noisy_GHZ_bitflip':\n prob = np.random.rand(m)\n circ.h(0)\n for k in range(1,m):\n circ.cx(0,k)\n if prob[k] <= p: # flips each bit with proba p\n circ.x(k)\n if prob[0] <= p:\n circ.x(0)\n \n elif name == 'noisy_GHZ_QDC':\n probas = [1 - 3*p/4, p/4, p/4, p/4]\n gate_inds = np.random.choice(np.arange(4), size = m, p = probas)\n circ.h(0)\n for k in range(1,m):\n circ.cx(0,k)\n if gate_inds[k] == 1:\n circ.x(k)\n elif gate_inds[k] == 2:\n circ.y(k)\n elif gate_inds[k] == 3:\n circ.z(k)\n if gate_inds[0] == 1:\n circ.x(0)\n elif gate_inds[0] == 2:\n circ.y(0)\n elif gate_inds[0] == 3:\n circ.z(0)\n \n elif name == 'rigged_QDC': # QDC where 1st and 2nd qubits have different probas\n probas_rigged = [1-p, p/2, p/2, 0]\n probas_rigged2 = [1 - 29*p/30, 2*p/5, 2*p/5, p/6]\n probas = [1 - 3*p/4, p/4, p/4, p/4]\n gate_inds = np.random.choice(np.arange(4), size = m - 1, p = probas)\n gate_inds_r = np.random.choice(np.arange(4), p = probas_rigged)\n gate_inds_r2 = np.random.choice(np.arange(4), p = probas_rigged2)\n circ.h(0)\n circ.cx(0,1)\n if gate_inds_r2 == 1:\n circ.x(1)\n elif gate_inds_r2 == 2:\n circ.y(1)\n elif gate_inds_r2 == 3:\n circ.z(1)\n for k in range(2,m):\n circ.cx(0,k)\n if gate_inds[k-1] == 1:\n circ.x(k)\n elif gate_inds[k-1] == 2:\n circ.y(k)\n elif gate_inds[k-1] == 3:\n circ.z(k)\n if gate_inds_r == 1:\n circ.x(0)\n elif gate_inds_r == 2:\n circ.y(0)\n elif gate_inds_r == 3:\n circ.z(0)\n else:\n raise ValueError('Unrecognized name.')\n \n return circ\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def optical_flow_from_video():
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
<|reserved_special_token_0|>
def optical_flow_from_camera_farneback(flip=True, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.
INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3,
20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y:end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale
=0.5, levels=3, winsize=7, iterations=3, poly_n=5,
poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optical_flow_from_video():
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
<|reserved_special_token_0|>
def optical_flow_from_camera_farneback(flip=True, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.
INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3,
20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y:end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale
=0.5, levels=3, winsize=7, iterations=3, poly_n=5,
poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,
levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,
flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optical_flow_from_video():
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.
INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3,
20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y:end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale
=0.5, levels=3, winsize=7, iterations=3, poly_n=5,
poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,
levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,
flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def optical_flow_from_video():
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,
blockSize=7)
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.
TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
None, **lk_params)
good_new = p1[st == 1]
good_old = p0[st == 1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 255 == ord('q'):
break
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'
)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,
5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.
INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3,
20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y:end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale
=0.5, levels=3, winsize=7, iterations=3, poly_n=5,
poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)
if cv2.waitKey(1) & 255 == 'q':
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
cap = cv2.VideoCapture(
'/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.
INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=
cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,
levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,
flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 255 == 'q':
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
if __name__ == '__main__':
optical_flow_farneback_and_write_video()
pass
<|reserved_special_token_1|>
import numpy as np
import cv2
def optical_flow_from_video():
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera():
cap = cv2.VideoCapture(0)
# 设置 ShiTomasi 角点检测的参数
feature_params = dict(maxCorners=100, qualityLevel=0.3,
minDistance=7, blockSize=7)
# 设置 lucas kanade 光流场的参数
# maxLevel 为使用图像金字塔的层数
lk_params = dict(winSize=(15, 15), maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 产生随机的颜色值
color = np.random.randint(0, 255, (100, 3))
# 获取第一帧,并寻找其中的角点
_, old_frame = cap.read()
old_frame = cv2.flip(old_frame, 1)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 创建一个掩膜为了后面绘制角点的光流轨迹
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
if ret:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算能够获取的角点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 绘制角点的轨迹
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
if cv2.waitKey(30) & 0xff == ord("q"):
break
# 更新当前帧和当前角点的位置
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
else:
break
pass
cv2.destroyAllWindows()
cap.release()
pass
def optical_flow_from_camera_farneback2():
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
ret, frame1 = cap.read()
frame1 = cv2.flip(frame1, 1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.flip(frame2, 1)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback(flip=True, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi")
# cap = cv2.VideoCapture(0)
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_and_write_video():
# cap = cv2.VideoCapture('eccv.avi')
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
width = 640
height = 480
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
i += 1
cv2.imwrite("{}/{}.jpg".format("test2", str(i)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_farneback_and_write_video():
def crop(frame):
# start_x = 1400
# end_x = start_x + 600
# start_y = 100
# end_y = start_y + 700
start_x = 800
end_x = start_x + 500
start_y = 1500
end_y = start_y + 500
return frame[start_x:end_x, start_y: end_y]
cap = cv2.VideoCapture('./yaogan/chen_1.mp4')
ret, frame1 = cap.read()
frame1 = crop(frame1)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
i = 0
while True:
try:
ret, frame2 = cap.read()
i += 1
if i % 2 != 0:
continue
frame2 = crop(frame2)
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,
winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
cv2.imwrite("{}/{}.jpg".format("test2", str(i // 3)), result)
if cv2.waitKey(1) & 0xff == "q":
break
prvs = next
except Exception:
break
pass
cap.release()
cv2.destroyAllWindows()
pass
def optical_flow_from_camera_farneback_2(flip=False, resize=True):
# cap = cv2.VideoCapture('test.mp4')
# cap = cv2.VideoCapture('test2.ts')
cap = cv2.VideoCapture("/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi")
# cap = cv2.VideoCapture(0)
width = 800
height = 500
cap.set(3, width)
cap.set(4, height)
ret, frame1 = cap.read()
if flip:
frame1 = cv2.flip(frame1, 1)
if resize:
frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
while True:
try:
ret, frame2 = cap.read()
if flip:
frame2 = cv2.flip(frame2, 1)
if resize:
frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)
cv2.imshow('frame1', frame2)
except Exception:
break
pass
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,
iterations=5, poly_n=5, poly_sigma=1.2, flags=1)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('frame2', rgb)
result = np.concatenate((frame2, rgb), axis=1)
cv2.imshow('result', result)
if cv2.waitKey(100) & 0xff == "q":
break
prvs = next
pass
cap.release()
cv2.destroyAllWindows()
pass
if __name__ == '__main__':
optical_flow_farneback_and_write_video()
pass
|
flexible
|
{
"blob_id": "ae0547aa1af2d4dd73bb60154574e64e74107a58",
"index": 4062,
"step-1": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\n<mask token>\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7,\n blockSize=7)\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.\n TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n color = np.random.randint(0, 255, (100, 3))\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n mask = np.zeros_like(old_frame)\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,\n None, **lk_params)\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 255 == ord('q'):\n break\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n pass\n cv2.destroyAllWindows()\n cap.release()\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n cap.set(3, 640)\n cap.set(4, 480)\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi'\n )\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3,\n 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.\n INTER_CUBIC)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, \n 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n i += 1\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y:end_y]\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while True:\n try:\n ret, frame2 = cap.read()\n i += 1\n if i % 2 != 0:\n continue\n frame2 = crop(frame2)\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale\n =0.5, levels=3, winsize=7, iterations=3, poly_n=5,\n poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n cv2.imwrite('{}/{}.jpg'.format('test2', str(i // 3)), result)\n if cv2.waitKey(1) & 255 == 'q':\n break\n prvs = next\n except Exception:\n break\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n cap = cv2.VideoCapture(\n '/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi')\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.\n INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=\n cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5,\n levels=3, winsize=8, iterations=5, poly_n=5, poly_sigma=1.2,\n flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n cv2.imshow('frame2', rgb)\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n if cv2.waitKey(100) & 255 == 'q':\n break\n prvs = next\n pass\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-5": "import numpy as np\nimport cv2\n\n\ndef optical_flow_from_video():\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera():\n cap = cv2.VideoCapture(0)\n\n # 设置 ShiTomasi 角点检测的参数\n feature_params = dict(maxCorners=100, qualityLevel=0.3,\n minDistance=7, blockSize=7)\n # 设置 lucas kanade 光流场的参数\n # maxLevel 为使用图像金字塔的层数\n lk_params = dict(winSize=(15, 15), maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n # 产生随机的颜色值\n color = np.random.randint(0, 255, (100, 3))\n\n # 获取第一帧,并寻找其中的角点\n _, old_frame = cap.read()\n old_frame = cv2.flip(old_frame, 1)\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\n p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n # 创建一个掩膜为了后面绘制角点的光流轨迹\n mask = np.zeros_like(old_frame)\n\n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n if ret:\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 计算能够获取的角点的新位置\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n # 绘制角点的轨迹\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n if cv2.waitKey(30) & 0xff == ord(\"q\"):\n break\n\n # 更新当前帧和当前角点的位置\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n else:\n break\n\n pass\n\n cv2.destroyAllWindows()\n cap.release()\n\n pass\n\n\ndef optical_flow_from_camera_farneback2():\n cap = cv2.VideoCapture(0)\n\n cap.set(3, 640)\n cap.set(4, 480)\n\n ret, frame1 = cap.read()\n frame1 = cv2.flip(frame1, 1)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.flip(frame2, 1)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback(flip=True, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/Avenue_dataset/training_videos/01.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\ndef optical_flow_from_camera_farneback_and_write_video():\n # cap = cv2.VideoCapture('eccv.avi')\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n width = 640\n height = 480\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 20, 3, 5, 1.2, 1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n i += 1\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_farneback_and_write_video():\n\n def crop(frame):\n # start_x = 1400\n # end_x = start_x + 600\n # start_y = 100\n # end_y = start_y + 700\n start_x = 800\n end_x = start_x + 500\n start_y = 1500\n end_y = start_y + 500\n return frame[start_x:end_x, start_y: end_y]\n\n cap = cv2.VideoCapture('./yaogan/chen_1.mp4')\n\n ret, frame1 = cap.read()\n frame1 = crop(frame1)\n\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n i = 0\n\n while True:\n try:\n ret, frame2 = cap.read()\n\n i += 1\n if i % 2 != 0:\n continue\n\n frame2 = crop(frame2)\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3,\n winsize=7, iterations=3, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n cv2.imwrite(\"{}/{}.jpg\".format(\"test2\", str(i // 3)), result)\n\n if cv2.waitKey(1) & 0xff == \"q\":\n break\n prvs = next\n except Exception:\n break\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n pass\n\n\ndef optical_flow_from_camera_farneback_2(flip=False, resize=True):\n # cap = cv2.VideoCapture('test.mp4')\n # cap = cv2.VideoCapture('test2.ts')\n cap = cv2.VideoCapture(\"/home/ubuntu/data1.5TB/异常dataset/ShanghaiTech/train/01_001.avi\")\n # cap = cv2.VideoCapture(0)\n\n width = 800\n height = 500\n cap.set(3, width)\n cap.set(4, height)\n\n ret, frame1 = cap.read()\n if flip:\n frame1 = cv2.flip(frame1, 1)\n if resize:\n frame1 = cv2.resize(frame1, (width, height), interpolation=cv2.INTER_CUBIC)\n prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n\n while True:\n try:\n ret, frame2 = cap.read()\n if flip:\n frame2 = cv2.flip(frame2, 1)\n if resize:\n frame2 = cv2.resize(frame2, (width, height), interpolation=cv2.INTER_CUBIC)\n cv2.imshow('frame1', frame2)\n except Exception:\n break\n pass\n\n next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=3, winsize=8,\n iterations=5, poly_n=5, poly_sigma=1.2, flags=1)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n cv2.imshow('frame2', rgb)\n\n result = np.concatenate((frame2, rgb), axis=1)\n cv2.imshow('result', result)\n\n if cv2.waitKey(100) & 0xff == \"q\":\n break\n prvs = next\n pass\n\n cap.release()\n cv2.destroyAllWindows()\n\n pass\n\n\nif __name__ == '__main__':\n optical_flow_farneback_and_write_video()\n pass\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(argv) is not 3:
print('Usage: python dna.py data.csv sequence.txt')
sys.exit()
with open(argv[1], 'r') as csv_file:
datafile = csv.reader(csv_file)
line_count = 0
for row in datafile:
datasave.insert(line_count, row)
line_count += 1
<|reserved_special_token_0|>
def STR(x):
ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'
]
DEF = ['AGATC', 'AATG', 'TATC']
A = ABC[x]
if argv[1] == 'databases/large.csv':
A = ABC[x]
elif argv[1] == 'databases/small.csv':
A = DEF[x]
return A
<|reserved_special_token_0|>
for i in range(rowlength):
newcount = 0
STR1 = STR(i)
while True:
Bfound = re.findall(STR1 * newcount, seqfile2)
if re.findall(STR1 * newcount, seqfile2) == []:
countvector.append(newcount - 1)
break
else:
newcount += 1
<|reserved_special_token_0|>
for row in datasave:
indexcount = 0
truecount = 0
for i in range(rowlength):
if search_list[i] in datasave[rowcount]:
truecount += 1
if truecount == rowlength:
rowplacement = rowcount
print(datasave[rowplacement][0])
break
indexcount += 1
rowcount += 1
if truecount is not rowlength and rowplacement == 0:
print('No match')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
datasave = []
if len(argv) is not 3:
print('Usage: python dna.py data.csv sequence.txt')
sys.exit()
with open(argv[1], 'r') as csv_file:
datafile = csv.reader(csv_file)
line_count = 0
for row in datafile:
datasave.insert(line_count, row)
line_count += 1
rowlength = len(datasave[0]) - 1
seqfile = open(argv[2], 'r')
countvector = []
def STR(x):
ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'
]
DEF = ['AGATC', 'AATG', 'TATC']
A = ABC[x]
if argv[1] == 'databases/large.csv':
A = ABC[x]
elif argv[1] == 'databases/small.csv':
A = DEF[x]
return A
seqfile2 = seqfile.read()
for i in range(rowlength):
newcount = 0
STR1 = STR(i)
while True:
Bfound = re.findall(STR1 * newcount, seqfile2)
if re.findall(STR1 * newcount, seqfile2) == []:
countvector.append(newcount - 1)
break
else:
newcount += 1
countvector = str(countvector)[1:-1]
countvector1 = countvector.replace(',', '')
search_list = countvector1.split(' ')
rowcount = 0
rowplacement = 0
for row in datasave:
indexcount = 0
truecount = 0
for i in range(rowlength):
if search_list[i] in datasave[rowcount]:
truecount += 1
if truecount == rowlength:
rowplacement = rowcount
print(datasave[rowplacement][0])
break
indexcount += 1
rowcount += 1
if truecount is not rowlength and rowplacement == 0:
print('No match')
<|reserved_special_token_1|>
import csv
from sys import argv
import re
import sys
datasave = []
if len(argv) is not 3:
print('Usage: python dna.py data.csv sequence.txt')
sys.exit()
with open(argv[1], 'r') as csv_file:
datafile = csv.reader(csv_file)
line_count = 0
for row in datafile:
datasave.insert(line_count, row)
line_count += 1
rowlength = len(datasave[0]) - 1
seqfile = open(argv[2], 'r')
countvector = []
def STR(x):
ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'
]
DEF = ['AGATC', 'AATG', 'TATC']
A = ABC[x]
if argv[1] == 'databases/large.csv':
A = ABC[x]
elif argv[1] == 'databases/small.csv':
A = DEF[x]
return A
seqfile2 = seqfile.read()
for i in range(rowlength):
newcount = 0
STR1 = STR(i)
while True:
Bfound = re.findall(STR1 * newcount, seqfile2)
if re.findall(STR1 * newcount, seqfile2) == []:
countvector.append(newcount - 1)
break
else:
newcount += 1
countvector = str(countvector)[1:-1]
countvector1 = countvector.replace(',', '')
search_list = countvector1.split(' ')
rowcount = 0
rowplacement = 0
for row in datasave:
indexcount = 0
truecount = 0
for i in range(rowlength):
if search_list[i] in datasave[rowcount]:
truecount += 1
if truecount == rowlength:
rowplacement = rowcount
print(datasave[rowplacement][0])
break
indexcount += 1
rowcount += 1
if truecount is not rowlength and rowplacement == 0:
print('No match')
<|reserved_special_token_1|>
import csv
from sys import argv
import re
import sys
datasave=[]
if len(argv) is not 3: #stop usage if not correct input
print('Usage: python dna.py data.csv sequence.txt')
sys.exit()
#open CSV file and save
with open (argv[1],'r') as csv_file:
datafile = csv.reader(csv_file)
line_count = 0
for row in datafile:
datasave.insert(line_count, row)
line_count += 1
rowlength= len(datasave[0])-1
seqfile= open(argv[2],'r') #read argv2
countvector=[]
def STR(x): #choose between large or small databse
ABC = ["AGATC", "TTTTTTCT", "AATG", "TCTAG", "GATA", "TATC", "GAAA", "TCTG"]
DEF =["AGATC", "AATG", "TATC"]
A = ABC[x]
if argv[1] == 'databases/large.csv':
A= ABC[x]
elif argv[1] == 'databases/small.csv':
A= DEF[x]
return A
seqfile2 = seqfile.read()
#reminder x in count is repeated x-1 in task description , 2 occurence = repeated 1 times
for i in range(rowlength):
newcount= 0
STR1= STR(i)
while True:
Bfound = re.findall(STR1*newcount,seqfile2)
if re.findall(STR1*newcount, seqfile2) == [] :
countvector.append(newcount-1)
break
else:
newcount += 1
countvector= str(countvector)[1:-1] #some formatting lines, converting first integers to string
countvector1= countvector.replace(',','') #removing ,
search_list= countvector1.split(' ') #splitting into list cuz the database i saved as list
rowcount=0
rowplacement=0
for row in datasave:
indexcount=0
truecount=0
for i in range(rowlength):
if search_list[i] in datasave[rowcount]:
truecount+=1 #testing if index matches lists
if truecount == rowlength: #matching all in the row will start this IF line
rowplacement=rowcount
print(datasave[rowplacement][0])
break #this break doesnt work???????
indexcount+=1
rowcount+=1
if truecount is not rowlength and rowplacement == 0 :
print('No match')
|
flexible
|
{
"blob_id": "1eeb7a539f43e9fb013494e2aa0d81b4eab0ae1a",
"index": 9353,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(argv) is not 3:\n print('Usage: python dna.py data.csv sequence.txt')\n sys.exit()\nwith open(argv[1], 'r') as csv_file:\n datafile = csv.reader(csv_file)\n line_count = 0\n for row in datafile:\n datasave.insert(line_count, row)\n line_count += 1\n<mask token>\n\n\ndef STR(x):\n ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'\n ]\n DEF = ['AGATC', 'AATG', 'TATC']\n A = ABC[x]\n if argv[1] == 'databases/large.csv':\n A = ABC[x]\n elif argv[1] == 'databases/small.csv':\n A = DEF[x]\n return A\n\n\n<mask token>\nfor i in range(rowlength):\n newcount = 0\n STR1 = STR(i)\n while True:\n Bfound = re.findall(STR1 * newcount, seqfile2)\n if re.findall(STR1 * newcount, seqfile2) == []:\n countvector.append(newcount - 1)\n break\n else:\n newcount += 1\n<mask token>\nfor row in datasave:\n indexcount = 0\n truecount = 0\n for i in range(rowlength):\n if search_list[i] in datasave[rowcount]:\n truecount += 1\n if truecount == rowlength:\n rowplacement = rowcount\n print(datasave[rowplacement][0])\n break\n indexcount += 1\n rowcount += 1\nif truecount is not rowlength and rowplacement == 0:\n print('No match')\n",
"step-3": "<mask token>\ndatasave = []\nif len(argv) is not 3:\n print('Usage: python dna.py data.csv sequence.txt')\n sys.exit()\nwith open(argv[1], 'r') as csv_file:\n datafile = csv.reader(csv_file)\n line_count = 0\n for row in datafile:\n datasave.insert(line_count, row)\n line_count += 1\nrowlength = len(datasave[0]) - 1\nseqfile = open(argv[2], 'r')\ncountvector = []\n\n\ndef STR(x):\n ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'\n ]\n DEF = ['AGATC', 'AATG', 'TATC']\n A = ABC[x]\n if argv[1] == 'databases/large.csv':\n A = ABC[x]\n elif argv[1] == 'databases/small.csv':\n A = DEF[x]\n return A\n\n\nseqfile2 = seqfile.read()\nfor i in range(rowlength):\n newcount = 0\n STR1 = STR(i)\n while True:\n Bfound = re.findall(STR1 * newcount, seqfile2)\n if re.findall(STR1 * newcount, seqfile2) == []:\n countvector.append(newcount - 1)\n break\n else:\n newcount += 1\ncountvector = str(countvector)[1:-1]\ncountvector1 = countvector.replace(',', '')\nsearch_list = countvector1.split(' ')\nrowcount = 0\nrowplacement = 0\nfor row in datasave:\n indexcount = 0\n truecount = 0\n for i in range(rowlength):\n if search_list[i] in datasave[rowcount]:\n truecount += 1\n if truecount == rowlength:\n rowplacement = rowcount\n print(datasave[rowplacement][0])\n break\n indexcount += 1\n rowcount += 1\nif truecount is not rowlength and rowplacement == 0:\n print('No match')\n",
"step-4": "import csv\nfrom sys import argv\nimport re\nimport sys\ndatasave = []\nif len(argv) is not 3:\n print('Usage: python dna.py data.csv sequence.txt')\n sys.exit()\nwith open(argv[1], 'r') as csv_file:\n datafile = csv.reader(csv_file)\n line_count = 0\n for row in datafile:\n datasave.insert(line_count, row)\n line_count += 1\nrowlength = len(datasave[0]) - 1\nseqfile = open(argv[2], 'r')\ncountvector = []\n\n\ndef STR(x):\n ABC = ['AGATC', 'TTTTTTCT', 'AATG', 'TCTAG', 'GATA', 'TATC', 'GAAA', 'TCTG'\n ]\n DEF = ['AGATC', 'AATG', 'TATC']\n A = ABC[x]\n if argv[1] == 'databases/large.csv':\n A = ABC[x]\n elif argv[1] == 'databases/small.csv':\n A = DEF[x]\n return A\n\n\nseqfile2 = seqfile.read()\nfor i in range(rowlength):\n newcount = 0\n STR1 = STR(i)\n while True:\n Bfound = re.findall(STR1 * newcount, seqfile2)\n if re.findall(STR1 * newcount, seqfile2) == []:\n countvector.append(newcount - 1)\n break\n else:\n newcount += 1\ncountvector = str(countvector)[1:-1]\ncountvector1 = countvector.replace(',', '')\nsearch_list = countvector1.split(' ')\nrowcount = 0\nrowplacement = 0\nfor row in datasave:\n indexcount = 0\n truecount = 0\n for i in range(rowlength):\n if search_list[i] in datasave[rowcount]:\n truecount += 1\n if truecount == rowlength:\n rowplacement = rowcount\n print(datasave[rowplacement][0])\n break\n indexcount += 1\n rowcount += 1\nif truecount is not rowlength and rowplacement == 0:\n print('No match')\n",
"step-5": "import csv\nfrom sys import argv\nimport re\nimport sys\n\n\ndatasave=[]\n\nif len(argv) is not 3: #stop usage if not correct input\n print('Usage: python dna.py data.csv sequence.txt')\n sys.exit()\n\n#open CSV file and save\nwith open (argv[1],'r') as csv_file:\n datafile = csv.reader(csv_file)\n line_count = 0\n for row in datafile:\n datasave.insert(line_count, row)\n line_count += 1\n \nrowlength= len(datasave[0])-1\nseqfile= open(argv[2],'r') #read argv2\ncountvector=[]\n\ndef STR(x): #choose between large or small databse\n\n ABC = [\"AGATC\", \"TTTTTTCT\", \"AATG\", \"TCTAG\", \"GATA\", \"TATC\", \"GAAA\", \"TCTG\"]\n DEF =[\"AGATC\", \"AATG\", \"TATC\"]\n A = ABC[x]\n \n if argv[1] == 'databases/large.csv':\n A= ABC[x]\n elif argv[1] == 'databases/small.csv':\n A= DEF[x]\n return A\n\n\nseqfile2 = seqfile.read()\n\n#reminder x in count is repeated x-1 in task description , 2 occurence = repeated 1 times\n\n\nfor i in range(rowlength):\n newcount= 0\n STR1= STR(i)\n\n while True:\n Bfound = re.findall(STR1*newcount,seqfile2)\n if re.findall(STR1*newcount, seqfile2) == [] :\n countvector.append(newcount-1)\n break\n else:\n newcount += 1\n \n\ncountvector= str(countvector)[1:-1] #some formatting lines, converting first integers to string\ncountvector1= countvector.replace(',','') #removing , \nsearch_list= countvector1.split(' ') #splitting into list cuz the database i saved as list\n\nrowcount=0\nrowplacement=0\n\nfor row in datasave:\n indexcount=0\n truecount=0\n for i in range(rowlength):\n if search_list[i] in datasave[rowcount]:\n truecount+=1 #testing if index matches lists\n if truecount == rowlength: #matching all in the row will start this IF line\n rowplacement=rowcount\n print(datasave[rowplacement][0])\n \n break #this break doesnt work???????\n \n indexcount+=1\n rowcount+=1\n \nif truecount is not rowlength and rowplacement == 0 :\n print('No match') \n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import nox
@nox.session(python=["3.9", "3.8", "3.7", "3.6"], venv_backend="conda", venv_params=["--use-local"])
def test(session):
"""Add tests
"""
session.install()
session.run("pytest")
@nox.session(python=["3.9", "3.8", "3.7", "3.6"])
def lint(session):
"""Lint the code with flake8.
"""
session.install("flake8")
session.run("flake8", "")
|
normal
|
{
"blob_id": "9aecf297ed36784d69e2be6fada31f7c1ac37500",
"index": 4778,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@nox.session(python=['3.9', '3.8', '3.7', '3.6'], venv_backend='conda',\n venv_params=['--use-local'])\ndef test(session):\n \"\"\"Add tests\n \"\"\"\n session.install()\n session.run('pytest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@nox.session(python=['3.9', '3.8', '3.7', '3.6'], venv_backend='conda',\n venv_params=['--use-local'])\ndef test(session):\n \"\"\"Add tests\n \"\"\"\n session.install()\n session.run('pytest')\n\n\n@nox.session(python=['3.9', '3.8', '3.7', '3.6'])\ndef lint(session):\n \"\"\"Lint the code with flake8.\n \"\"\"\n session.install('flake8')\n session.run('flake8', '')\n",
"step-4": "import nox\n\n\n@nox.session(python=['3.9', '3.8', '3.7', '3.6'], venv_backend='conda',\n venv_params=['--use-local'])\ndef test(session):\n \"\"\"Add tests\n \"\"\"\n session.install()\n session.run('pytest')\n\n\n@nox.session(python=['3.9', '3.8', '3.7', '3.6'])\ndef lint(session):\n \"\"\"Lint the code with flake8.\n \"\"\"\n session.install('flake8')\n session.run('flake8', '')\n",
"step-5": "import nox\n\n@nox.session(python=[\"3.9\", \"3.8\", \"3.7\", \"3.6\"], venv_backend=\"conda\", venv_params=[\"--use-local\"])\ndef test(session):\n \"\"\"Add tests\n \"\"\"\n session.install()\n session.run(\"pytest\")\n\n@nox.session(python=[\"3.9\", \"3.8\", \"3.7\", \"3.6\"])\ndef lint(session):\n \"\"\"Lint the code with flake8.\n \"\"\"\n session.install(\"flake8\")\n session.run(\"flake8\", \"\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Container(object):
<|reserved_special_token_0|>
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name', 'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True, stdout=True, timestamps
=False, stream=True):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True, stdout=True, timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all if c.name in self.cfg[
'service_name']][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
info = {}
return info
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Container(object):
<|reserved_special_token_0|>
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name', 'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
<|reserved_special_token_0|>
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True, stdout=True, timestamps
=False, stream=True):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True, stdout=True, timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all if c.name in self.cfg[
'service_name']][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
info = {}
return info
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Container(object):
"""Instance a defined container
This class instance a Docker container depending on its
name and model definition.
The basics Docker methods are implemented as well as a
Shaddock's specific one that return the information of
the concerned container.
Shaddock keep no tracks of any Container ID and rely on no
databases. THe containers are retrieve from their names.
"""
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name', 'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
def stop(self):
c = self.info.get('Container')
if c is not None:
print('Stopping container: {}'.format(self.cfg['name']))
return c.stop()
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True, stdout=True, timestamps
=False, stream=True):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True, stdout=True, timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all if c.name in self.cfg[
'service_name']][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
info = {}
return info
<|reserved_special_token_1|>
from shaddock.drivers.docker.api import DockerApi
from docker import errors as docker_errors
import sys
class Container(object):
"""Instance a defined container
This class instance a Docker container depending on its
name and model definition.
The basics Docker methods are implemented as well as a
Shaddock's specific one that return the information of
the concerned container.
Shaddock keep no tracks of any Container ID and rely on no
databases. THe containers are retrieve from their names.
"""
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name', 'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
def stop(self):
c = self.info.get('Container')
if c is not None:
print('Stopping container: {}'.format(self.cfg['name']))
return c.stop()
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True, stdout=True, timestamps
=False, stream=True):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True, stdout=True, timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all if c.name in self.cfg[
'service_name']][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
info = {}
return info
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Thibaut Lapierre <git@epheo.eu>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from shaddock.drivers.docker.api import DockerApi
from docker import errors as docker_errors
import sys
class Container(object):
"""Instance a defined container
This class instance a Docker container depending on its
name and model definition.
The basics Docker methods are implemented as well as a
Shaddock's specific one that return the information of
the concerned container.
Shaddock keep no tracks of any Container ID and rely on no
databases. THe containers are retrieve from their names.
"""
def __init__(self, svc_cfg, containers_all=None):
self.cfg = svc_cfg
self.env = dict(self.cfg)
# we may want to use func.__code__.co_varnames here to gather all
# possible arguments of the docker api and compare them with cfg
# and delete the crapy hack of the next 8 lines.
args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',
'cluster', 'images_dir', 'path', 'service_name',
'host']
for arg in args_to_delete:
try:
del self.env[arg]
except KeyError:
pass
self.env['detach'] = self.cfg.get('detach', True)
self.docker_client = None
if containers_all is None:
docker_api = DockerApi(self.cfg['api_cfg'])
self.docker_api = docker_api.connect()
self.docker_client = self.docker_api.containers
self.info = self._get_info(containers_all)
def gather_api_methods(self, func):
return func.__code__.co_varnames
def create(self):
"""Returns a Container object"""
print('Creating container: {}'.format(self.cfg['name']))
create = self.docker_client.create(**self.env)
return create['id']
def start(self):
"""Returns a Container object"""
try:
print('Starting container: {}'.format(self.cfg['name']))
start = self.docker_client.run(**self.env)
except docker_errors.APIError as error:
print(error)
print('Container {} is already running'.format(self.cfg['name']))
return self.cfg['name']
return start
def stop(self):
c = self.info.get('Container')
if c is not None:
print('Stopping container: {}'.format(self.cfg['name']))
return c.stop()
def remove(self):
self.stop()
c = self.info.get('Container')
if c is not None:
print('Removing container: {}'.format(self.cfg['name']))
try:
c.remove()
except docker_errors.NotFound:
print('Container {} does not exist'.format(self.cfg['name']))
return True
def restart(self):
self.docker_client.restart(self.info['Id'])
def return_shell(self, cmd):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
ret = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=True,
)
for line in ret[1]:
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.exec_run(cmd,
stderr=True,
stdout=True,
stream=False)
print(line[1])
def return_logs(self):
if self.cfg['image'] is not None:
# "Fix" in order to not use the stream generator in Python2
c = self.info.get('Container')
if sys.version_info > (3, 0):
try:
for line in c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=True,
):
print(line.decode('utf-8').rstrip())
except (KeyboardInterrupt, SystemExit):
return True
else:
line = c.logs(stderr=True,
stdout=True,
timestamps=False,
stream=False)
print(line)
def _get_info(self, containers_all=None):
info = {}
if containers_all is None:
containers_all = self.docker_client.list(all=True)
try:
container = [c for c in containers_all
if (c.name in self.cfg['service_name'])][0]
api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')
api = api.connect()
infos = api.inspect_container(container.id)
info['Container'] = container
info['Id'] = container.id
info['Ip'] = infos['NetworkSettings']['IPAddress']
info['State'] = container.status
except IndexError:
# Container is not running
info = {}
return info
|
flexible
|
{
"blob_id": "c2c1194ed23adda015b23897888d1a4cc11423d5",
"index": 5074,
"step-1": "<mask token>\n\n\nclass Container(object):\n <mask token>\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n <mask token>\n <mask token>\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-2": "<mask token>\n\n\nclass Container(object):\n <mask token>\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n <mask token>\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-3": "<mask token>\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-4": "from shaddock.drivers.docker.api import DockerApi\nfrom docker import errors as docker_errors\nimport sys\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name', 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd, stderr=True, stdout=True, stream=True\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd, stderr=True, stdout=True, stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True, stdout=True, timestamps\n =False, stream=True):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True, stdout=True, timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all if c.name in self.cfg[\n 'service_name']][0]\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n except IndexError:\n info = {}\n return info\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2014 Thibaut Lapierre <git@epheo.eu>. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom shaddock.drivers.docker.api import DockerApi\nfrom docker import errors as docker_errors\nimport sys\n\n\nclass Container(object):\n \"\"\"Instance a defined container\n\n This class instance a Docker container depending on its\n name and model definition.\n The basics Docker methods are implemented as well as a\n Shaddock's specific one that return the information of\n the concerned container.\n\n Shaddock keep no tracks of any Container ID and rely on no\n databases. THe containers are retrieve from their names.\n \"\"\"\n\n def __init__(self, svc_cfg, containers_all=None):\n self.cfg = svc_cfg\n self.env = dict(self.cfg)\n # we may want to use func.__code__.co_varnames here to gather all\n # possible arguments of the docker api and compare them with cfg\n # and delete the crapy hack of the next 8 lines.\n args_to_delete = ['priority', 'depends-on', 'detach', 'api_cfg',\n 'cluster', 'images_dir', 'path', 'service_name',\n 'host']\n for arg in args_to_delete:\n try:\n del self.env[arg]\n except KeyError:\n pass\n self.env['detach'] = self.cfg.get('detach', True)\n self.docker_client = None\n if containers_all is None:\n docker_api = DockerApi(self.cfg['api_cfg'])\n self.docker_api = docker_api.connect()\n self.docker_client = self.docker_api.containers\n self.info = self._get_info(containers_all)\n\n def gather_api_methods(self, func):\n return func.__code__.co_varnames\n\n def create(self):\n \"\"\"Returns a Container object\"\"\"\n print('Creating container: {}'.format(self.cfg['name']))\n create = self.docker_client.create(**self.env)\n return create['id']\n\n def start(self):\n \"\"\"Returns a Container object\"\"\"\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n\n return start\n\n def stop(self):\n c = self.info.get('Container')\n if c is not None:\n print('Stopping container: {}'.format(self.cfg['name']))\n return c.stop()\n\n def remove(self):\n self.stop()\n c = self.info.get('Container')\n if c is not None:\n print('Removing container: {}'.format(self.cfg['name']))\n try:\n c.remove()\n except docker_errors.NotFound:\n print('Container {} does not exist'.format(self.cfg['name']))\n return True\n\n def restart(self):\n self.docker_client.restart(self.info['Id'])\n\n def return_shell(self, cmd):\n if self.cfg['image'] is not None:\n # \"Fix\" in order to not use the stream generator in Python2\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n ret = c.exec_run(cmd,\n stderr=True,\n stdout=True,\n stream=True,\n )\n for line in ret[1]:\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.exec_run(cmd,\n stderr=True,\n stdout=True,\n stream=False)\n print(line[1])\n\n def return_logs(self):\n if self.cfg['image'] is not None:\n # \"Fix\" in order to not use the stream generator in Python2\n c = self.info.get('Container')\n if sys.version_info > (3, 0):\n try:\n for line in c.logs(stderr=True,\n stdout=True,\n timestamps=False,\n stream=True,\n ):\n print(line.decode('utf-8').rstrip())\n except (KeyboardInterrupt, SystemExit):\n return True\n else:\n line = c.logs(stderr=True,\n stdout=True,\n timestamps=False,\n stream=False)\n print(line)\n\n def _get_info(self, containers_all=None):\n info = {}\n if containers_all is None:\n containers_all = self.docker_client.list(all=True)\n try:\n container = [c for c in containers_all\n if (c.name in self.cfg['service_name'])][0]\n\n api = DockerApi(self.cfg['api_cfg'], 'lowlevelapi')\n api = api.connect()\n infos = api.inspect_container(container.id)\n\n info['Container'] = container\n info['Id'] = container.id\n info['Ip'] = infos['NetworkSettings']['IPAddress']\n info['State'] = container.status\n\n except IndexError:\n # Container is not running\n info = {}\n return info\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
class DengueInfection(BasedDataset):
<|reserved_special_token_0|>
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<|reserved_special_token_0|>
def week_of_year(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
<|reserved_special_token_0|>
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def precip_mm(self):
self.fill_nan(col='precip_mm')
<|reserved_special_token_0|>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<|reserved_special_token_0|>
def week_of_year(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
<|reserved_special_token_0|>
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def precip_mm(self):
self.fill_nan(col='precip_mm')
<|reserved_special_token_0|>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
<|reserved_special_token_0|>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<|reserved_special_token_0|>
def week_of_year(self):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
<|reserved_special_token_0|>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
<|reserved_special_token_0|>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
<|reserved_special_token_0|>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<|reserved_special_token_0|>
def week_of_year(self):
pass
<|reserved_special_token_0|>
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
<|reserved_special_token_0|>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
<|reserved_special_token_0|>
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
<|reserved_special_token_0|>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
<|reserved_special_token_1|>
# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
import math
import numpy as np
import pandas as pd
from data.based.based_dataset import BasedDataset
from data.based.file_types import FileTypes
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
# self.six_month()
# self.week_split()
self.city()
self.cyclic_encoder(col='weekofyear',max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(f'year == "{row["year"]}" & month =="{row["month"]}"').reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row["year"]][col].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
def extract_quarter(self):
self.df['quarter'] = self.df['week_start_date'].dt.quarter
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),
'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),
'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
def year(self):
pass
def week_of_year(self):
pass
def week_start_date(self):
pass
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
def ncep_humidity_percent(self):
self.fill_nan(col='NCEP_humidity_percent')
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
def max_temp_c(self):
self.fill_nan(col='max_temp_c')
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
def total_cases(self):
self.df = self.df[self.df['total_cases'] < 41]
def city(self):
self.df = self.df[self.df['city'] != 'sj']
|
flexible
|
{
"blob_id": "93ac8a1f795f7809a3e88b56ce90bf1d31706554",
"index": 1139,
"step-1": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n <mask token>\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-2": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-3": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-4": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-5": "# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.\n\n\nimport math\n\nimport numpy as np\nimport pandas as pd\n\nfrom data.based.based_dataset import BasedDataset\nfrom data.based.file_types import FileTypes\n\n\nclass DengueInfection(BasedDataset):\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)\n\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n # self.six_month()\n # self.week_split()\n self.city()\n self.cyclic_encoder(col='weekofyear',max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n\n self.df[col + '_no_nans'] = self.df[col]\n\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(f'year == \"{row[\"year\"]}\" & month ==\"{row[\"month\"]}\"').reset_index()\n city = row['city']\n value = query[city]\n\n if value.empty:\n value = self.df.loc[self.df['year'] == row[\"year\"]][col].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n\n def extract_quarter(self):\n self.df['quarter'] = self.df['week_start_date'].dt.quarter\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),\n 'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),\n 'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n\n def year(self):\n pass\n\n def week_of_year(self):\n pass\n\n def week_start_date(self):\n pass\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n\n def ncep_humidity_percent(self):\n self.fill_nan(col='NCEP_humidity_percent')\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n\n def max_temp_c(self):\n self.fill_nan(col='max_temp_c')\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n\n def total_cases(self):\n self.df = self.df[self.df['total_cases'] < 41]\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n\n",
"step-ids": [
16,
18,
22,
25,
33
]
}
|
[
16,
18,
22,
25,
33
] |
a = input().split(' ')
A = int(a[0])
B = int(a[1])
X = int(a[2])
if A <= X and A + B >= X:
print('YES')
else:
print('NO')
|
normal
|
{
"blob_id": "9a60449aa13bc5e7e413d0e47a1972d93ccfe69f",
"index": 7194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-3": "a = input().split(' ')\nA = int(a[0])\nB = int(a[1])\nX = int(a[2])\nif A <= X and A + B >= X:\n print('YES')\nelse:\n print('NO')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding=utf-8
while True:
a,b=input().split()
a=float(a)
b=float(b)
if b==0:
print("error")
else:
c=a/b+0.5
c=int(c)
print(c)
|
normal
|
{
"blob_id": "dab5e7ee1d14cba485cbaece1354ec8d686ca4ab",
"index": 9080,
"step-1": "<mask token>\n",
"step-2": "while True:\n a, b = input().split()\n a = float(a)\n b = float(b)\n if b == 0:\n print('error')\n else:\n c = a / b + 0.5\n c = int(c)\n print(c)\n",
"step-3": "# coding=utf-8\nwhile True:\n a,b=input().split()\n a=float(a)\n b=float(b)\n if b==0:\n print(\"error\")\n else:\n c=a/b+0.5\n c=int(c)\n print(c)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import shutil
from tqdm import tqdm
from pathlib import Path
from eval_mead import PERCENT
DATAPATH = '../../../data/test'
# MEAD_DIR = 'mead'
MEAD_DIR = os.path.abspath('mead')
MEAD_DATA_PATH = f'{MEAD_DIR}/data'
MEAD_BIN = f'{MEAD_DIR}/bin'
MEAD_LIB = f'{MEAD_DIR}/lib'
MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'
MEAD_DID = f'{MEAD_DIR}/did'
TARGET = 'MEAD_TEST'
DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)
parse = True
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')
config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')
CONFIG = f"""<?xml version='1.0' encoding='utf-8'?>
<MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent">
<FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature">
<FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" />
<FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" />
<FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" />
</FEATURE-SET>
<CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" />
<COMPRESSION BASIS="sentences" PERCENT="1" />
</MEAD-CONFIG>
"""
if parse:
### Get raw text ###
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
# Setting Env. Var.
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(
MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
# Write raw text, cluster file
# This stuff should be generated by text2cluster.pl
# cluster_lines = []
# cluster_lines.append("<?xml version = '1.0' encoding='utf-8'?>\n")
# cluster_lines.append("<CLUSTER LANG='ENG'>\n")
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
# did = f'raw_text_{i+1}.txt'
did = f'{i+1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
# make sure the sent split are the same as our annotation
stream.write('\n'.join(paper))
# delete </ pattern or XML might break
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/<\///g"')
# https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/[><]//g"')
# https://validator.w3.org/feed/docs/error/SAXError.html
# https://www.w3.org/TR/REC-xml/#dt-chardata
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
# cluster_lines.append(f"\t<D DID='{did}' />\n")
# cluster_lines.append('</CLUSTER>\n')
# Get docsent
# with open(cluster_file, 'w') as stream:
# stream.writelines(cluster_lines)
# Path(cluster_file).touch()
print('Create cluster and docsent files...')
os.system(
f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')
print("Currently, it has bug and can't create file")
# Run config
# with open(config_file, 'w') as stream:
# stream.write(CONFIG)
# extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')
# os.system(
# f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')
# https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
OUTPUT_PATH = '../output'
OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')
extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')
# compression basis is "sentence", and give PERCENT% summary
shared_parameters = f'-sentences -percent {PERCENT}'
# os.system(
# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')
|
normal
|
{
"blob_id": "887ae9b7c629be679bf4f5fb4311c31bff605c73",
"index": 8874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n<mask token>\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n<mask token>\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n<mask token>\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-3": "<mask token>\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-4": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\nDATAPATH = '../../../data/test'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\nif parse:\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r'\n ) as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(MEAD_FORMATTING_ADDONS,\n 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print(\n 'Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n did = f'{i + 1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n stream.write('\\n'.join(paper))\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n print('Create cluster and docsent files...')\n os.system(f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl'\n )\n print(\"Currently, it has bug and can't create file\")\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"'\n )\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\nshared_parameters = f'-sentences -percent {PERCENT}'\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}'\n )\n",
"step-5": "import os\nimport shutil\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom eval_mead import PERCENT\n\nDATAPATH = '../../../data/test'\n# MEAD_DIR = 'mead'\nMEAD_DIR = os.path.abspath('mead')\nMEAD_DATA_PATH = f'{MEAD_DIR}/data'\nMEAD_BIN = f'{MEAD_DIR}/bin'\nMEAD_LIB = f'{MEAD_DIR}/lib'\nMEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'\nMEAD_DID = f'{MEAD_DIR}/did'\nTARGET = 'MEAD_TEST'\n\n\nDATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)\nparse = True\nif os.path.exists(DATA_DIR):\n override = input('Data exist, override (delete and re-parse)? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(DATA_DIR)\n else:\n parse = False\nos.makedirs(DATA_DIR, exist_ok=True)\n\ncluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')\nconfig_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')\n\nCONFIG = f\"\"\"<?xml version='1.0' encoding='utf-8'?>\n<MEAD-CONFIG LANG=\"ENG\" TARGET=\"MEAD_TEST\" CLUSTER-PATH=\"{DATA_DIR}\" DOC-DIRECTORY=\"{DATA_DIR}/docsent\">\n<FEATURE-SET BASE-DIRECTORY=\"{DATA_DIR}/feature\">\n<FEATURE NAME=\"Position\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Position.pl\" />\n<FEATURE NAME=\"Length\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Length.pl\" />\n<FEATURE NAME=\"Centroid\" SCRIPT=\"{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG\" />\n</FEATURE-SET>\n<CLASSIFIER COMMAND-LINE=\"{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0\" SYSTEM=\"MEADORIG\" />\n<COMPRESSION BASIS=\"sentences\" PERCENT=\"1\" />\n</MEAD-CONFIG>\n\"\"\"\n\nif parse:\n\n ### Get raw text ###\n\n with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:\n raw_papers = stream.readlines()\n\n papers = [paper.strip().split('##SENT##') for paper in raw_papers]\n\n # Setting Env. Var.\n\n with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DID))\n print('line 18 of', os.path.join(\n MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))\n print(stream.readlines()[17])\n with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:\n print('Make sure you have change the following line to absolute path to',\n os.path.abspath(MEAD_DIR))\n print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))\n print(stream.readlines()[30])\n\n print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))\n os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)\n\n # Write raw text, cluster file\n\n # This stuff should be generated by text2cluster.pl\n # cluster_lines = []\n # cluster_lines.append(\"<?xml version = '1.0' encoding='utf-8'?>\\n\")\n # cluster_lines.append(\"<CLUSTER LANG='ENG'>\\n\")\n\n print('Converting src to raw text...')\n for i, paper in tqdm(enumerate(papers), total=len(papers)):\n\n # did = f'raw_text_{i+1}.txt'\n did = f'{i+1}'\n text_file = os.path.join(DATA_DIR, did)\n with open(text_file, 'w') as stream:\n # make sure the sent split are the same as our annotation\n stream.write('\\n'.join(paper))\n\n # delete </ pattern or XML might break\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/<\\///g\"')\n # https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file\n # os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/[><]//g\"')\n\n # https://validator.w3.org/feed/docs/error/SAXError.html\n # https://www.w3.org/TR/REC-xml/#dt-chardata\n print('Clean up stuff that might influence XML parsing...')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/</</g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/&/&/g\"')\n os.system(f'find {DATA_DIR} -type f | xargs sed -i \"s/>/>/g\"')\n\n # cluster_lines.append(f\"\\t<D DID='{did}' />\\n\")\n # cluster_lines.append('</CLUSTER>\\n')\n\n # Get docsent\n\n # with open(cluster_file, 'w') as stream:\n # stream.writelines(cluster_lines)\n\n # Path(cluster_file).touch()\n\n print('Create cluster and docsent files...')\n os.system(\n f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')\n\n if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:\n print(\n 'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')\n print(\"Currently, it has bug and can't create file\")\n\n # Run config\n\n # with open(config_file, 'w') as stream:\n # stream.write(CONFIG)\n\n # extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')\n # os.system(\n # f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')\n\n # https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands\n os.system(\n f'find {DATA_DIR} -name \"*.cluster\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n os.system(\n f'find {DATA_DIR} -name \"*.docsent\" | xargs sed -i \"s/<?xml version=\\'1.0\\'?>/<?xml version=\\'1.0\\' encoding=\\'utf-8\\'?>/g\"')\n\n\nOUTPUT_PATH = '../output'\nOUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')\nif os.path.exists(OUTPUT_DIR):\n override = input('Result exist, do you want to re-run? (Y/n): ')\n if override.lower() == 'y':\n shutil.rmtree(OUTPUT_DIR)\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n\nsummary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')\nextract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')\n# compression basis is \"sentence\", and give PERCENT% summary\nshared_parameters = f'-sentences -percent {PERCENT}'\n\n# os.system(\n# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')\nos.system(\n f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def retrieve_all_years(database_connection: mysql.connector.connect) ->List[int
]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'
)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
parser = argparse.ArgumentParser()
parser.add_argument('--ga-property-code', dest='ga_property_code', type
=str, help='Google Analytics Property Code (default: %(default)s)',
default=config_dict['report']['ga_property_code'])
parser.add_argument('--css-directory', dest='css_directory', type=str,
help=
'Directory where the base CSS stylesheet file is stored (default: %(default)s)'
, default=config_dict['report']['css_directory'])
parser.add_argument('--css-filename', dest='css_filename', type=str,
help=
'File name of the report CSS stylesheet file (default: %(default)s)',
default=config_dict['report']['css_filename'])
parser.add_argument('--output-directory', dest='output_directory', type
=str, help=
'Directory where the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_directory'])
parser.add_argument('--output-filename', dest='output_filename', type=
str, help=
'File name of the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_filename'])
args = parser.parse_args()
if args.ga_property_code != config_dict['report']['ga_property_code']:
config_dict['report']['ga_property_code'] = args.ga_property_code
if args.css_directory != config_dict['report']['css_directory']:
config_dict['report']['css_directory'] = args.css_directory
if args.css_filename != config_dict['report']['css_filename']:
config_dict['report']['css_filename'] = args.css_filename
if args.output_directory != config_dict['report']['output_directory']:
config_dict['report']['output_directory'] = args.output_directory
if args.output_filename != config_dict['report']['output_filename']:
config_dict['report']['output_filename'] = args.output_filename
return config_dict
def render_report(show_years: List[int], panelists: List[Dict],
report_settings: Dict) ->Text:
"""Render appearances report using Jinja2"""
template_loader = FileSystemLoader('./template')
template_env = Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
template_file = 'report.tmpl.html'
template = template_env.get_template(template_file)
time_zone = pytz.timezone('America/Los_Angeles')
rendered_date_time = datetime.now(time_zone)
render_data = {}
render_data['show_years'] = show_years
render_data['panelists'] = panelists
render_data['settings'] = report_settings
render_data['rendered_at'] = rendered_date_time.strftime(
'%A, %B %d, %Y %H:%M:%S %Z')
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text, report_settings: Dict) ->None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings['css_directory'],
report_settings['css_filename'])
output_path = os.path.join(report_settings['output_directory'],
report_settings['output_filename'])
if not os.path.isdir(report_settings['output_directory']):
os.mkdir(report_settings['output_directory'])
with open(output_path, 'w') as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print('Error: {} is not writable'.format(output_path))
shutil.copy2(css_path, report_settings['output_directory'])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config['database'])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years, panelists=
panelists, report_settings=app_config['report'])
generate_output_files(rendered_report=rendered_report, report_settings=
app_config['report'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def retrieve_all_panelist_appearance_counts(database_connection: mysql.
connector.connect) ->List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'
)
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist['name'] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=
panelist_id, database_connection=database_connection)
panelist['appearances'] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) ->List[int
]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'
)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
parser = argparse.ArgumentParser()
parser.add_argument('--ga-property-code', dest='ga_property_code', type
=str, help='Google Analytics Property Code (default: %(default)s)',
default=config_dict['report']['ga_property_code'])
parser.add_argument('--css-directory', dest='css_directory', type=str,
help=
'Directory where the base CSS stylesheet file is stored (default: %(default)s)'
, default=config_dict['report']['css_directory'])
parser.add_argument('--css-filename', dest='css_filename', type=str,
help=
'File name of the report CSS stylesheet file (default: %(default)s)',
default=config_dict['report']['css_filename'])
parser.add_argument('--output-directory', dest='output_directory', type
=str, help=
'Directory where the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_directory'])
parser.add_argument('--output-filename', dest='output_filename', type=
str, help=
'File name of the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_filename'])
args = parser.parse_args()
if args.ga_property_code != config_dict['report']['ga_property_code']:
config_dict['report']['ga_property_code'] = args.ga_property_code
if args.css_directory != config_dict['report']['css_directory']:
config_dict['report']['css_directory'] = args.css_directory
if args.css_filename != config_dict['report']['css_filename']:
config_dict['report']['css_filename'] = args.css_filename
if args.output_directory != config_dict['report']['output_directory']:
config_dict['report']['output_directory'] = args.output_directory
if args.output_filename != config_dict['report']['output_filename']:
config_dict['report']['output_filename'] = args.output_filename
return config_dict
def render_report(show_years: List[int], panelists: List[Dict],
report_settings: Dict) ->Text:
"""Render appearances report using Jinja2"""
template_loader = FileSystemLoader('./template')
template_env = Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
template_file = 'report.tmpl.html'
template = template_env.get_template(template_file)
time_zone = pytz.timezone('America/Los_Angeles')
rendered_date_time = datetime.now(time_zone)
render_data = {}
render_data['show_years'] = show_years
render_data['panelists'] = panelists
render_data['settings'] = report_settings
render_data['rendered_at'] = rendered_date_time.strftime(
'%A, %B %d, %Y %H:%M:%S %Z')
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text, report_settings: Dict) ->None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings['css_directory'],
report_settings['css_filename'])
output_path = os.path.join(report_settings['output_directory'],
report_settings['output_filename'])
if not os.path.isdir(report_settings['output_directory']):
os.mkdir(report_settings['output_directory'])
with open(output_path, 'w') as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print('Error: {} is not writable'.format(output_path))
shutil.copy2(css_path, report_settings['output_directory'])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config['database'])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years, panelists=
panelists, report_settings=app_config['report'])
generate_output_files(rendered_report=rendered_report, report_settings=
app_config['report'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect) ->List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = (
'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'
)
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances['total'] = total_appearances
return appearances
def retrieve_all_panelist_appearance_counts(database_connection: mysql.
connector.connect) ->List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'
)
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist['name'] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=
panelist_id, database_connection=database_connection)
panelist['appearances'] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) ->List[int
]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'
)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
parser = argparse.ArgumentParser()
parser.add_argument('--ga-property-code', dest='ga_property_code', type
=str, help='Google Analytics Property Code (default: %(default)s)',
default=config_dict['report']['ga_property_code'])
parser.add_argument('--css-directory', dest='css_directory', type=str,
help=
'Directory where the base CSS stylesheet file is stored (default: %(default)s)'
, default=config_dict['report']['css_directory'])
parser.add_argument('--css-filename', dest='css_filename', type=str,
help=
'File name of the report CSS stylesheet file (default: %(default)s)',
default=config_dict['report']['css_filename'])
parser.add_argument('--output-directory', dest='output_directory', type
=str, help=
'Directory where the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_directory'])
parser.add_argument('--output-filename', dest='output_filename', type=
str, help=
'File name of the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_filename'])
args = parser.parse_args()
if args.ga_property_code != config_dict['report']['ga_property_code']:
config_dict['report']['ga_property_code'] = args.ga_property_code
if args.css_directory != config_dict['report']['css_directory']:
config_dict['report']['css_directory'] = args.css_directory
if args.css_filename != config_dict['report']['css_filename']:
config_dict['report']['css_filename'] = args.css_filename
if args.output_directory != config_dict['report']['output_directory']:
config_dict['report']['output_directory'] = args.output_directory
if args.output_filename != config_dict['report']['output_filename']:
config_dict['report']['output_filename'] = args.output_filename
return config_dict
def render_report(show_years: List[int], panelists: List[Dict],
report_settings: Dict) ->Text:
"""Render appearances report using Jinja2"""
template_loader = FileSystemLoader('./template')
template_env = Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
template_file = 'report.tmpl.html'
template = template_env.get_template(template_file)
time_zone = pytz.timezone('America/Los_Angeles')
rendered_date_time = datetime.now(time_zone)
render_data = {}
render_data['show_years'] = show_years
render_data['panelists'] = panelists
render_data['settings'] = report_settings
render_data['rendered_at'] = rendered_date_time.strftime(
'%A, %B %d, %Y %H:%M:%S %Z')
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text, report_settings: Dict) ->None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings['css_directory'],
report_settings['css_filename'])
output_path = os.path.join(report_settings['output_directory'],
report_settings['output_filename'])
if not os.path.isdir(report_settings['output_directory']):
os.mkdir(report_settings['output_directory'])
with open(output_path, 'w') as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print('Error: {} is not writable'.format(output_path))
shutil.copy2(css_path, report_settings['output_directory'])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config['database'])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years, panelists=
panelists, report_settings=app_config['report'])
generate_output_files(rendered_report=rendered_report, report_settings=
app_config['report'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect) ->List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = (
'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'
)
cursor.execute(query, (panelist_id,))
result = cursor.fetchall()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances['total'] = total_appearances
return appearances
def retrieve_all_panelist_appearance_counts(database_connection: mysql.
connector.connect) ->List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'
)
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist['name'] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=
panelist_id, database_connection=database_connection)
panelist['appearances'] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) ->List[int
]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = (
'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'
)
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
parser = argparse.ArgumentParser()
parser.add_argument('--ga-property-code', dest='ga_property_code', type
=str, help='Google Analytics Property Code (default: %(default)s)',
default=config_dict['report']['ga_property_code'])
parser.add_argument('--css-directory', dest='css_directory', type=str,
help=
'Directory where the base CSS stylesheet file is stored (default: %(default)s)'
, default=config_dict['report']['css_directory'])
parser.add_argument('--css-filename', dest='css_filename', type=str,
help=
'File name of the report CSS stylesheet file (default: %(default)s)',
default=config_dict['report']['css_filename'])
parser.add_argument('--output-directory', dest='output_directory', type
=str, help=
'Directory where the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_directory'])
parser.add_argument('--output-filename', dest='output_filename', type=
str, help=
'File name of the generated report will be saved (default: %(default)s)'
, default=config_dict['report']['output_filename'])
args = parser.parse_args()
if args.ga_property_code != config_dict['report']['ga_property_code']:
config_dict['report']['ga_property_code'] = args.ga_property_code
if args.css_directory != config_dict['report']['css_directory']:
config_dict['report']['css_directory'] = args.css_directory
if args.css_filename != config_dict['report']['css_filename']:
config_dict['report']['css_filename'] = args.css_filename
if args.output_directory != config_dict['report']['output_directory']:
config_dict['report']['output_directory'] = args.output_directory
if args.output_filename != config_dict['report']['output_filename']:
config_dict['report']['output_filename'] = args.output_filename
return config_dict
def render_report(show_years: List[int], panelists: List[Dict],
report_settings: Dict) ->Text:
"""Render appearances report using Jinja2"""
template_loader = FileSystemLoader('./template')
template_env = Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
template_file = 'report.tmpl.html'
template = template_env.get_template(template_file)
time_zone = pytz.timezone('America/Los_Angeles')
rendered_date_time = datetime.now(time_zone)
render_data = {}
render_data['show_years'] = show_years
render_data['panelists'] = panelists
render_data['settings'] = report_settings
render_data['rendered_at'] = rendered_date_time.strftime(
'%A, %B %d, %Y %H:%M:%S %Z')
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text, report_settings: Dict) ->None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings['css_directory'],
report_settings['css_filename'])
output_path = os.path.join(report_settings['output_directory'],
report_settings['output_filename'])
if not os.path.isdir(report_settings['output_directory']):
os.mkdir(report_settings['output_directory'])
with open(output_path, 'w') as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print('Error: {} is not writable'.format(output_path))
shutil.copy2(css_path, report_settings['output_directory'])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config['database'])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years, panelists=
panelists, report_settings=app_config['report'])
generate_output_files(rendered_report=rendered_report, report_settings=
app_config['report'])
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2019 Linh Pham
# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0
"""WWDTM Panelist Appearance Report Generator"""
import argparse
from collections import OrderedDict
from datetime import datetime
import json
import os
import shutil
from typing import List, Dict, Text
import mysql.connector
import pytz
from jinja2 import Environment, FileSystemLoader
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances["total"] = total_appearances
return appearances
def retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve all appearance counts for all panelists from the
database"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT p.panelistid, p.panelist "
"FROM ww_showpnlmap pm "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE s.bestof = 0 AND s.repeatshowid IS NULL "
"ORDER BY p.panelist ASC")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
panelists = []
for row in result:
panelist = {}
panelist_id = row[0]
panelist["name"] = row[1]
appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,
database_connection=database_connection)
panelist["appearances"] = appearances
panelists.append(panelist)
return panelists
def retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:
"""Retrieve a list of all available show years"""
cursor = database_connection.cursor()
query = ("SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s "
"ORDER BY YEAR(s.showdate) ASC")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
years = []
for row in result:
years.append(row[0])
return years
def load_config():
"""Load configuration values from configuration file and from
options passed into script execution"""
# Read in configuration file for default values
with open("config.json", "r") as config_file:
config_dict = json.load(config_file)
# Read in options passed in that override values from the config.json file
parser = argparse.ArgumentParser()
parser.add_argument("--ga-property-code",
dest="ga_property_code",
type=str,
help="Google Analytics Property Code (default: %(default)s)",
default=config_dict["report"]["ga_property_code"])
parser.add_argument("--css-directory",
dest="css_directory",
type=str,
help="Directory where the base CSS stylesheet file is stored "
"(default: %(default)s)",
default=config_dict["report"]["css_directory"])
parser.add_argument("--css-filename",
dest="css_filename",
type=str,
help="File name of the report CSS stylesheet file "
"(default: %(default)s)",
default=config_dict["report"]["css_filename"])
parser.add_argument("--output-directory",
dest="output_directory",
type=str,
help="Directory where the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_directory"])
parser.add_argument("--output-filename",
dest="output_filename",
type=str,
help="File name of the generated report will be saved "
"(default: %(default)s)",
default=config_dict["report"]["output_filename"])
args = parser.parse_args()
# Override the values from the config.json file if values were set via argparse
if args.ga_property_code != config_dict["report"]["ga_property_code"]:
config_dict["report"]["ga_property_code"] = args.ga_property_code
if args.css_directory != config_dict["report"]["css_directory"]:
config_dict["report"]["css_directory"] = args.css_directory
if args.css_filename != config_dict["report"]["css_filename"]:
config_dict["report"]["css_filename"] = args.css_filename
if args.output_directory != config_dict["report"]["output_directory"]:
config_dict["report"]["output_directory"] = args.output_directory
if args.output_filename != config_dict["report"]["output_filename"]:
config_dict["report"]["output_filename"] = args.output_filename
return config_dict
def render_report(show_years: List[int],
panelists: List[Dict],
report_settings: Dict
) -> Text:
"""Render appearances report using Jinja2"""
# Setup Jinja2 Template
template_loader = FileSystemLoader("./template")
template_env = Environment(loader=template_loader,
trim_blocks=True,
lstrip_blocks=True)
template_file = "report.tmpl.html"
template = template_env.get_template(template_file)
# Generate timestamp to include in page footer
time_zone = pytz.timezone("America/Los_Angeles")
rendered_date_time = datetime.now(time_zone)
# Build dictionary to pass into template renderer
render_data = {}
render_data["show_years"] = show_years
render_data["panelists"] = panelists
render_data["settings"] = report_settings
render_data["rendered_at"] = rendered_date_time.strftime("%A, %B %d, %Y %H:%M:%S %Z")
# Render the report and write out to output directory
report = template.render(render_data=render_data)
return report
def generate_output_files(rendered_report: Text,
report_settings: Dict) -> None:
"""Writes out the generated report file to file in the output directory
and copies the base CSS file to the same directory"""
css_path = os.path.join(report_settings["css_directory"],
report_settings["css_filename"])
output_path = os.path.join(report_settings["output_directory"],
report_settings["output_filename"])
# Create the output directory if it does not exist
if not os.path.isdir(report_settings["output_directory"]):
os.mkdir(report_settings["output_directory"])
# Write out the generated report
with open(output_path, "w") as output_file:
if output_file.writable():
output_file.write(rendered_report)
else:
print("Error: {} is not writable".format(output_path))
# Copy CSS file into output directory
shutil.copy2(css_path, report_settings["output_directory"])
return
def main():
"""Bootstrap database connection, retrieve panelist appearance data,
generate the report and create an output bundle"""
app_config = load_config()
database_connection = mysql.connector.connect(**app_config["database"])
panelists = retrieve_all_panelist_appearance_counts(database_connection)
show_years = retrieve_all_years(database_connection)
rendered_report = render_report(show_years=show_years,
panelists=panelists,
report_settings=app_config["report"])
generate_output_files(rendered_report=rendered_report,
report_settings=app_config["report"])
# Only run if executed as a script and not imported
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "2d20bac0f11fa724b2d0a2e0676e5b9ce7682777",
"index": 7387,
"step-1": "<mask token>\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect) ->List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count FROM ww_showpnlmap pm JOIN ww_shows s ON s.showid = pm.showid JOIN ww_panelists p ON p.panelistid = pm.panelistid WHERE pm.panelistid = %s AND s.bestof = 0 AND s.repeatshowid IS NULL GROUP BY p.panelist, YEAR(s.showdate) ORDER BY p.panelist ASC, YEAR(s.showdate) ASC'\n )\n cursor.execute(query, (panelist_id,))\n result = cursor.fetchall()\n if not result:\n return None\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n appearances['total'] = total_appearances\n return appearances\n\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.\n connector.connect) ->List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT p.panelistid, p.panelist FROM ww_showpnlmap pm JOIN ww_panelists p ON p.panelistid = pm.panelistid JOIN ww_shows s ON s.showid = pm.showid WHERE s.bestof = 0 AND s.repeatshowid IS NULL ORDER BY p.panelist ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n return None\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist['name'] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=\n panelist_id, database_connection=database_connection)\n panelist['appearances'] = appearances\n panelists.append(panelist)\n return panelists\n\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) ->List[int\n ]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\n 'SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s ORDER BY YEAR(s.showdate) ASC'\n )\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n if not result:\n return None\n years = []\n for row in result:\n years.append(row[0])\n return years\n\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n with open('config.json', 'r') as config_file:\n config_dict = json.load(config_file)\n parser = argparse.ArgumentParser()\n parser.add_argument('--ga-property-code', dest='ga_property_code', type\n =str, help='Google Analytics Property Code (default: %(default)s)',\n default=config_dict['report']['ga_property_code'])\n parser.add_argument('--css-directory', dest='css_directory', type=str,\n help=\n 'Directory where the base CSS stylesheet file is stored (default: %(default)s)'\n , default=config_dict['report']['css_directory'])\n parser.add_argument('--css-filename', dest='css_filename', type=str,\n help=\n 'File name of the report CSS stylesheet file (default: %(default)s)',\n default=config_dict['report']['css_filename'])\n parser.add_argument('--output-directory', dest='output_directory', type\n =str, help=\n 'Directory where the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_directory'])\n parser.add_argument('--output-filename', dest='output_filename', type=\n str, help=\n 'File name of the generated report will be saved (default: %(default)s)'\n , default=config_dict['report']['output_filename'])\n args = parser.parse_args()\n if args.ga_property_code != config_dict['report']['ga_property_code']:\n config_dict['report']['ga_property_code'] = args.ga_property_code\n if args.css_directory != config_dict['report']['css_directory']:\n config_dict['report']['css_directory'] = args.css_directory\n if args.css_filename != config_dict['report']['css_filename']:\n config_dict['report']['css_filename'] = args.css_filename\n if args.output_directory != config_dict['report']['output_directory']:\n config_dict['report']['output_directory'] = args.output_directory\n if args.output_filename != config_dict['report']['output_filename']:\n config_dict['report']['output_filename'] = args.output_filename\n return config_dict\n\n\ndef render_report(show_years: List[int], panelists: List[Dict],\n report_settings: Dict) ->Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n template_loader = FileSystemLoader('./template')\n template_env = Environment(loader=template_loader, trim_blocks=True,\n lstrip_blocks=True)\n template_file = 'report.tmpl.html'\n template = template_env.get_template(template_file)\n time_zone = pytz.timezone('America/Los_Angeles')\n rendered_date_time = datetime.now(time_zone)\n render_data = {}\n render_data['show_years'] = show_years\n render_data['panelists'] = panelists\n render_data['settings'] = report_settings\n render_data['rendered_at'] = rendered_date_time.strftime(\n '%A, %B %d, %Y %H:%M:%S %Z')\n report = template.render(render_data=render_data)\n return report\n\n\ndef generate_output_files(rendered_report: Text, report_settings: Dict) ->None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n css_path = os.path.join(report_settings['css_directory'],\n report_settings['css_filename'])\n output_path = os.path.join(report_settings['output_directory'],\n report_settings['output_filename'])\n if not os.path.isdir(report_settings['output_directory']):\n os.mkdir(report_settings['output_directory'])\n with open(output_path, 'w') as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print('Error: {} is not writable'.format(output_path))\n shutil.copy2(css_path, report_settings['output_directory'])\n return\n\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config['database'])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n rendered_report = render_report(show_years=show_years, panelists=\n panelists, report_settings=app_config['report'])\n generate_output_files(rendered_report=rendered_report, report_settings=\n app_config['report'])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018-2019 Linh Pham\n# wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0\n\"\"\"WWDTM Panelist Appearance Report Generator\"\"\"\n\nimport argparse\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport json\nimport os\nimport shutil\nfrom typing import List, Dict, Text\nimport mysql.connector\nimport pytz\n\nfrom jinja2 import Environment, FileSystemLoader\n\ndef retrieve_panelist_appearance_counts(panelist_id: int,\n database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve yearly apperance count for the requested panelist ID\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"WHERE pm.panelistid = %s AND s.bestof = 0 \"\n \"AND s.repeatshowid IS NULL \"\n \"GROUP BY p.panelist, YEAR(s.showdate) \"\n \"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC\")\n cursor.execute(query, (panelist_id, ))\n result = cursor.fetchall()\n\n if not result:\n return None\n\n appearances = OrderedDict()\n total_appearances = 0\n for row in result:\n appearances[row[0]] = row[1]\n total_appearances += row[1]\n\n appearances[\"total\"] = total_appearances\n return appearances\n\ndef retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect\n ) -> List[Dict]:\n \"\"\"Retrieve all appearance counts for all panelists from the\n database\"\"\"\n\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT p.panelistid, p.panelist \"\n \"FROM ww_showpnlmap pm \"\n \"JOIN ww_panelists p ON p.panelistid = pm.panelistid \"\n \"JOIN ww_shows s ON s.showid = pm.showid \"\n \"WHERE s.bestof = 0 AND s.repeatshowid IS NULL \"\n \"ORDER BY p.panelist ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n\n if not result:\n return None\n\n panelists = []\n for row in result:\n panelist = {}\n panelist_id = row[0]\n panelist[\"name\"] = row[1]\n appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id,\n database_connection=database_connection)\n panelist[\"appearances\"] = appearances\n panelists.append(panelist)\n\n return panelists\n\ndef retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]:\n \"\"\"Retrieve a list of all available show years\"\"\"\n cursor = database_connection.cursor()\n query = (\"SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s \"\n \"ORDER BY YEAR(s.showdate) ASC\")\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n\n if not result:\n return None\n\n years = []\n for row in result:\n years.append(row[0])\n\n return years\n\ndef load_config():\n \"\"\"Load configuration values from configuration file and from\n options passed into script execution\"\"\"\n\n # Read in configuration file for default values\n with open(\"config.json\", \"r\") as config_file:\n config_dict = json.load(config_file)\n\n # Read in options passed in that override values from the config.json file\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ga-property-code\",\n dest=\"ga_property_code\",\n type=str,\n help=\"Google Analytics Property Code (default: %(default)s)\",\n default=config_dict[\"report\"][\"ga_property_code\"])\n parser.add_argument(\"--css-directory\",\n dest=\"css_directory\",\n type=str,\n help=\"Directory where the base CSS stylesheet file is stored \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_directory\"])\n parser.add_argument(\"--css-filename\",\n dest=\"css_filename\",\n type=str,\n help=\"File name of the report CSS stylesheet file \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"css_filename\"])\n parser.add_argument(\"--output-directory\",\n dest=\"output_directory\",\n type=str,\n help=\"Directory where the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_directory\"])\n parser.add_argument(\"--output-filename\",\n dest=\"output_filename\",\n type=str,\n help=\"File name of the generated report will be saved \"\n \"(default: %(default)s)\",\n default=config_dict[\"report\"][\"output_filename\"])\n args = parser.parse_args()\n\n # Override the values from the config.json file if values were set via argparse\n if args.ga_property_code != config_dict[\"report\"][\"ga_property_code\"]:\n config_dict[\"report\"][\"ga_property_code\"] = args.ga_property_code\n\n if args.css_directory != config_dict[\"report\"][\"css_directory\"]:\n config_dict[\"report\"][\"css_directory\"] = args.css_directory\n\n if args.css_filename != config_dict[\"report\"][\"css_filename\"]:\n config_dict[\"report\"][\"css_filename\"] = args.css_filename\n\n if args.output_directory != config_dict[\"report\"][\"output_directory\"]:\n config_dict[\"report\"][\"output_directory\"] = args.output_directory\n\n if args.output_filename != config_dict[\"report\"][\"output_filename\"]:\n config_dict[\"report\"][\"output_filename\"] = args.output_filename\n\n return config_dict\n\ndef render_report(show_years: List[int],\n panelists: List[Dict],\n report_settings: Dict\n ) -> Text:\n \"\"\"Render appearances report using Jinja2\"\"\"\n\n # Setup Jinja2 Template\n template_loader = FileSystemLoader(\"./template\")\n template_env = Environment(loader=template_loader,\n trim_blocks=True,\n lstrip_blocks=True)\n template_file = \"report.tmpl.html\"\n template = template_env.get_template(template_file)\n\n # Generate timestamp to include in page footer\n time_zone = pytz.timezone(\"America/Los_Angeles\")\n rendered_date_time = datetime.now(time_zone)\n\n # Build dictionary to pass into template renderer\n render_data = {}\n render_data[\"show_years\"] = show_years\n render_data[\"panelists\"] = panelists\n render_data[\"settings\"] = report_settings\n render_data[\"rendered_at\"] = rendered_date_time.strftime(\"%A, %B %d, %Y %H:%M:%S %Z\")\n\n # Render the report and write out to output directory\n report = template.render(render_data=render_data)\n return report\n\ndef generate_output_files(rendered_report: Text,\n report_settings: Dict) -> None:\n \"\"\"Writes out the generated report file to file in the output directory\n and copies the base CSS file to the same directory\"\"\"\n\n css_path = os.path.join(report_settings[\"css_directory\"],\n report_settings[\"css_filename\"])\n output_path = os.path.join(report_settings[\"output_directory\"],\n report_settings[\"output_filename\"])\n\n # Create the output directory if it does not exist\n if not os.path.isdir(report_settings[\"output_directory\"]):\n os.mkdir(report_settings[\"output_directory\"])\n\n # Write out the generated report\n with open(output_path, \"w\") as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print(\"Error: {} is not writable\".format(output_path))\n\n # Copy CSS file into output directory\n shutil.copy2(css_path, report_settings[\"output_directory\"])\n\n return\n\ndef main():\n \"\"\"Bootstrap database connection, retrieve panelist appearance data,\n generate the report and create an output bundle\"\"\"\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])\n\n# Only run if executed as a script and not imported\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
class SimpleControlComponentGuide(SimpleControlComponent):
<|reserved_special_token_0|>
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Guide Component:' + name)
super(SimpleControlComponentGuide, self).__init__(name, parent)
guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)
self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,
minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)
self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)
self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')
self.mainCtrl.rotatePoints(90, 0, 0)
data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.
getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}
self.loadData(data)
Profiler.getInstance().pop()
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(SimpleControlComponentGuide, self).saveData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentGuide, self).loadData(data)
self.ctrlSizeInputAttr.setValue(data['ctrlSize'])
self.mainCtrl.xfo = data['ctrlXfo']
scaleValue = data['ctrlSize']
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig.
Return:
The JSON rig data object.
"""
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def resizeMainCtrl(self, newSize):
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return SimpleControlComponentRig
class SimpleControlComponentRig(SimpleControlComponent):
"""Simple Control Component Rig"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Rig Component:' + name)
super(SimpleControlComponentRig, self).__init__(name, parent)
self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)
self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()
self.mainCtrl.lockScale(x=True, y=True, z=True)
mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',
parent=self.mainCtrl)
self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=
mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)
self.rigScaleOutputAttr.connect(self.rigScaleAttr)
deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=
deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.mainDef = Joint('main', parent=self.defCmpGrp)
self.mainDef.setComponent(self)
self.mainInputConstraint = PoseConstraint('_'.join([self.
mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))
self.mainInputConstraint.setMaintainOffset(True)
self.mainInputConstraint.addConstrainer(self.mainInputTgt)
self.mainCtrlSpace.addConstraint(self.mainInputConstraint)
self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt
.getName(), 'To', self.mainCtrl.getName()]))
self.mainOutputConstraint.addConstrainer(self.mainCtrl)
self.outputTgt.addConstraint(self.mainOutputConstraint)
self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.
getName(), 'To', self.mainCtrl.getName()]))
self.mainDefConstraint.addConstrainer(self.mainCtrl)
self.mainDef.addConstraint(self.mainDefConstraint)
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentRig, self).loadData(data)
ctrlSize = data.get('ctrlSize', 1.0)
ctrlXfo = data.get('ctrlXfo', Xfo())
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))
self.mainCtrlSpace.xfo = ctrlXfo
self.mainCtrl.xfo = ctrlXfo
self.mainInputTgt.xfo = ctrlXfo
self.mainDef.xfo = ctrlXfo
self.outputTgt.xfo = ctrlXfo
self.mainInputConstraint.evaluate()
self.mainOutputConstraint.evaluate()
self.mainDefConstraint.evaluate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SimpleControlComponent(BaseExampleComponent):
<|reserved_special_token_0|>
def __init__(self, name='SimpleControl', parent=None):
super(SimpleControlComponent, self).__init__(name, parent)
self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',
parent=self.inputHrcGrp).getTarget()
self.outputTgt = self.createOutput('output', dataType='Xfo', parent
=self.outputHrcGrp).getTarget()
self.drawDebugInputAttr = self.createInput('drawDebug', dataType=
'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=
'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()
class SimpleControlComponentGuide(SimpleControlComponent):
"""Simple Control Component Guide"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Guide Component:' + name)
super(SimpleControlComponentGuide, self).__init__(name, parent)
guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)
self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,
minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)
self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)
self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')
self.mainCtrl.rotatePoints(90, 0, 0)
data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.
getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}
self.loadData(data)
Profiler.getInstance().pop()
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(SimpleControlComponentGuide, self).saveData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentGuide, self).loadData(data)
self.ctrlSizeInputAttr.setValue(data['ctrlSize'])
self.mainCtrl.xfo = data['ctrlXfo']
scaleValue = data['ctrlSize']
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig.
Return:
The JSON rig data object.
"""
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def resizeMainCtrl(self, newSize):
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return SimpleControlComponentRig
class SimpleControlComponentRig(SimpleControlComponent):
"""Simple Control Component Rig"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Rig Component:' + name)
super(SimpleControlComponentRig, self).__init__(name, parent)
self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)
self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()
self.mainCtrl.lockScale(x=True, y=True, z=True)
mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',
parent=self.mainCtrl)
self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=
mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)
self.rigScaleOutputAttr.connect(self.rigScaleAttr)
deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=
deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.mainDef = Joint('main', parent=self.defCmpGrp)
self.mainDef.setComponent(self)
self.mainInputConstraint = PoseConstraint('_'.join([self.
mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))
self.mainInputConstraint.setMaintainOffset(True)
self.mainInputConstraint.addConstrainer(self.mainInputTgt)
self.mainCtrlSpace.addConstraint(self.mainInputConstraint)
self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt
.getName(), 'To', self.mainCtrl.getName()]))
self.mainOutputConstraint.addConstrainer(self.mainCtrl)
self.outputTgt.addConstraint(self.mainOutputConstraint)
self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.
getName(), 'To', self.mainCtrl.getName()]))
self.mainDefConstraint.addConstrainer(self.mainCtrl)
self.mainDef.addConstraint(self.mainDefConstraint)
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentRig, self).loadData(data)
ctrlSize = data.get('ctrlSize', 1.0)
ctrlXfo = data.get('ctrlXfo', Xfo())
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))
self.mainCtrlSpace.xfo = ctrlXfo
self.mainCtrl.xfo = ctrlXfo
self.mainInputTgt.xfo = ctrlXfo
self.mainDef.xfo = ctrlXfo
self.outputTgt.xfo = ctrlXfo
self.mainInputConstraint.evaluate()
self.mainOutputConstraint.evaluate()
self.mainDefConstraint.evaluate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SimpleControlComponent(BaseExampleComponent):
"""Simple Control Component Base"""
def __init__(self, name='SimpleControl', parent=None):
super(SimpleControlComponent, self).__init__(name, parent)
self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',
parent=self.inputHrcGrp).getTarget()
self.outputTgt = self.createOutput('output', dataType='Xfo', parent
=self.outputHrcGrp).getTarget()
self.drawDebugInputAttr = self.createInput('drawDebug', dataType=
'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=
'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()
class SimpleControlComponentGuide(SimpleControlComponent):
"""Simple Control Component Guide"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Guide Component:' + name)
super(SimpleControlComponentGuide, self).__init__(name, parent)
guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)
self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,
minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)
self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)
self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')
self.mainCtrl.rotatePoints(90, 0, 0)
data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.
getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}
self.loadData(data)
Profiler.getInstance().pop()
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(SimpleControlComponentGuide, self).saveData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentGuide, self).loadData(data)
self.ctrlSizeInputAttr.setValue(data['ctrlSize'])
self.mainCtrl.xfo = data['ctrlXfo']
scaleValue = data['ctrlSize']
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig.
Return:
The JSON rig data object.
"""
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def resizeMainCtrl(self, newSize):
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return SimpleControlComponentRig
class SimpleControlComponentRig(SimpleControlComponent):
"""Simple Control Component Rig"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Rig Component:' + name)
super(SimpleControlComponentRig, self).__init__(name, parent)
self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)
self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()
self.mainCtrl.lockScale(x=True, y=True, z=True)
mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',
parent=self.mainCtrl)
self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=
mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)
self.rigScaleOutputAttr.connect(self.rigScaleAttr)
deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=
deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.mainDef = Joint('main', parent=self.defCmpGrp)
self.mainDef.setComponent(self)
self.mainInputConstraint = PoseConstraint('_'.join([self.
mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))
self.mainInputConstraint.setMaintainOffset(True)
self.mainInputConstraint.addConstrainer(self.mainInputTgt)
self.mainCtrlSpace.addConstraint(self.mainInputConstraint)
self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt
.getName(), 'To', self.mainCtrl.getName()]))
self.mainOutputConstraint.addConstrainer(self.mainCtrl)
self.outputTgt.addConstraint(self.mainOutputConstraint)
self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.
getName(), 'To', self.mainCtrl.getName()]))
self.mainDefConstraint.addConstrainer(self.mainCtrl)
self.mainDef.addConstraint(self.mainDefConstraint)
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentRig, self).loadData(data)
ctrlSize = data.get('ctrlSize', 1.0)
ctrlXfo = data.get('ctrlXfo', Xfo())
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))
self.mainCtrlSpace.xfo = ctrlXfo
self.mainCtrl.xfo = ctrlXfo
self.mainInputTgt.xfo = ctrlXfo
self.mainDef.xfo = ctrlXfo
self.outputTgt.xfo = ctrlXfo
self.mainInputConstraint.evaluate()
self.mainOutputConstraint.evaluate()
self.mainDefConstraint.evaluate()
<|reserved_special_token_0|>
ks.registerComponent(SimpleControlComponentGuide)
ks.registerComponent(SimpleControlComponentRig)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SimpleControlComponent(BaseExampleComponent):
"""Simple Control Component Base"""
def __init__(self, name='SimpleControl', parent=None):
super(SimpleControlComponent, self).__init__(name, parent)
self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',
parent=self.inputHrcGrp).getTarget()
self.outputTgt = self.createOutput('output', dataType='Xfo', parent
=self.outputHrcGrp).getTarget()
self.drawDebugInputAttr = self.createInput('drawDebug', dataType=
'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=
'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()
class SimpleControlComponentGuide(SimpleControlComponent):
"""Simple Control Component Guide"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Guide Component:' + name)
super(SimpleControlComponentGuide, self).__init__(name, parent)
guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)
self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,
minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)
self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)
self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')
self.mainCtrl.rotatePoints(90, 0, 0)
data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.
getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}
self.loadData(data)
Profiler.getInstance().pop()
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(SimpleControlComponentGuide, self).saveData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentGuide, self).loadData(data)
self.ctrlSizeInputAttr.setValue(data['ctrlSize'])
self.mainCtrl.xfo = data['ctrlXfo']
scaleValue = data['ctrlSize']
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig.
Return:
The JSON rig data object.
"""
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()
data['ctrlXfo'] = self.mainCtrl.xfo
return data
def resizeMainCtrl(self, newSize):
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return SimpleControlComponentRig
class SimpleControlComponentRig(SimpleControlComponent):
"""Simple Control Component Rig"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push(
'Construct Simple Control Rig Component:' + name)
super(SimpleControlComponentRig, self).__init__(name, parent)
self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)
self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()
self.mainCtrl.lockScale(x=True, y=True, z=True)
mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',
parent=self.mainCtrl)
self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=
mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)
self.rigScaleOutputAttr.connect(self.rigScaleAttr)
deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=
deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.mainDef = Joint('main', parent=self.defCmpGrp)
self.mainDef.setComponent(self)
self.mainInputConstraint = PoseConstraint('_'.join([self.
mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))
self.mainInputConstraint.setMaintainOffset(True)
self.mainInputConstraint.addConstrainer(self.mainInputTgt)
self.mainCtrlSpace.addConstraint(self.mainInputConstraint)
self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt
.getName(), 'To', self.mainCtrl.getName()]))
self.mainOutputConstraint.addConstrainer(self.mainCtrl)
self.outputTgt.addConstraint(self.mainOutputConstraint)
self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.
getName(), 'To', self.mainCtrl.getName()]))
self.mainDefConstraint.addConstrainer(self.mainCtrl)
self.mainDef.addConstraint(self.mainDefConstraint)
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentRig, self).loadData(data)
ctrlSize = data.get('ctrlSize', 1.0)
ctrlXfo = data.get('ctrlXfo', Xfo())
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))
self.mainCtrlSpace.xfo = ctrlXfo
self.mainCtrl.xfo = ctrlXfo
self.mainInputTgt.xfo = ctrlXfo
self.mainDef.xfo = ctrlXfo
self.outputTgt.xfo = ctrlXfo
self.mainInputConstraint.evaluate()
self.mainOutputConstraint.evaluate()
self.mainDefConstraint.evaluate()
<|reserved_special_token_0|>
ks = KrakenSystem.getInstance()
ks.registerComponent(SimpleControlComponentGuide)
ks.registerComponent(SimpleControlComponentRig)
<|reserved_special_token_1|>
from kraken.core.maths import Vec3, Vec3, Euler, Quat, Xfo
from kraken.core.objects.components.base_example_component import BaseExampleComponent
from kraken.core.objects.attributes.attribute_group import AttributeGroup
from kraken.core.objects.attributes.scalar_attribute import ScalarAttribute
from kraken.core.objects.attributes.bool_attribute import BoolAttribute
from kraken.core.objects.constraints.pose_constraint import PoseConstraint
from kraken.core.objects.component_group import ComponentGroup
from kraken.core.objects.hierarchy_group import HierarchyGroup
from kraken.core.objects.locator import Locator
from kraken.core.objects.joint import Joint
from kraken.core.objects.ctrlSpace import CtrlSpace
from kraken.core.objects.control import Control
from kraken.core.objects.operators.kl_operator import KLOperator
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
class SimpleControlComponent(BaseExampleComponent):
"""Simple Control Component Base"""
def __init__(self, name='SimpleControl', parent=None):
super(SimpleControlComponent, self).__init__(name, parent)
# ===========
# Declare IO
# ===========
# Declare Inputs Xfos
self.mainInputTgt = self.createInput('mainInput', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
# Declare Output Xfos
self.outputTgt = self.createOutput('output', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
# Declare Input Attrs
self.drawDebugInputAttr = self.createInput('drawDebug', dataType='Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()
# Declare Output Attrs
self.rigScaleOutputAttr = self.createOutput('rigScale', dataType='Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()
class SimpleControlComponentGuide(SimpleControlComponent):
"""Simple Control Component Guide"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push("Construct Simple Control Guide Component:" + name)
super(SimpleControlComponentGuide, self).__init__(name, parent)
# =========
# Attributes
# =========
# Add Component Params to IK control
guideSettingsAttrGrp = AttributeGroup("GuideSettings", parent=self)
self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0, minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)
self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)
# =========
# Controls
# =========
# Guide Controls
self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')
self.mainCtrl.rotatePoints(90, 0, 0)
data = {
"location": 'M',
"ctrlSize": self.ctrlSizeInputAttr.getValue(),
"ctrlXfo": Xfo(tr=Vec3(0.0, 0.0, 0.0))
}
self.loadData(data)
Profiler.getInstance().pop()
# =============
# Data Methods
# =============
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(SimpleControlComponentGuide, self).saveData()
data["ctrlSize"] = self.ctrlSizeInputAttr.getValue()
data["ctrlXfo"] = self.mainCtrl.xfo
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentGuide, self).loadData( data )
self.ctrlSizeInputAttr.setValue(data["ctrlSize"])
self.mainCtrl.xfo = data["ctrlXfo"]
scaleValue = data["ctrlSize"]
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))
return True
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig.
Return:
The JSON rig data object.
"""
data = super(SimpleControlComponentGuide, self).getRigBuildData()
data["ctrlSize"] = self.ctrlSizeInputAttr.getValue()
data["ctrlXfo"] = self.mainCtrl.xfo
return data
# ==========
# Callbacks
# ==========
def resizeMainCtrl(self, newSize):
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))
# ==============
# Class Methods
# ==============
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return SimpleControlComponentRig
class SimpleControlComponentRig(SimpleControlComponent):
"""Simple Control Component Rig"""
def __init__(self, name='SimpleControl', parent=None):
Profiler.getInstance().push("Construct Simple Control Rig Component:" + name)
super(SimpleControlComponentRig, self).__init__(name, parent)
# =========
# Controls
# =========
# Add Controls
self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)
self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()
self.mainCtrl.lockScale(x=True, y=True, z=True)
# Add Component Params to Main control
mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings', parent=self.mainCtrl)
self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)
self.rigScaleOutputAttr.connect(self.rigScaleAttr)
# ==========
# Deformers
# ==========
deformersLayer = self.getOrCreateLayer('deformers')
self.defCmpGrp = ComponentGroup(self.getName(), self, parent=deformersLayer)
self.addItem('defCmpGrp', self.defCmpGrp)
self.mainDef = Joint('main', parent=self.defCmpGrp)
self.mainDef.setComponent(self)
# ==============
# Constrain I/O
# ==============
# Constrain inputs
self.mainInputConstraint = PoseConstraint('_'.join([self.mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))
self.mainInputConstraint.setMaintainOffset(True)
self.mainInputConstraint.addConstrainer(self.mainInputTgt)
self.mainCtrlSpace.addConstraint(self.mainInputConstraint)
# Constrain outputs
self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt.getName(), 'To', self.mainCtrl.getName()]))
self.mainOutputConstraint.addConstrainer(self.mainCtrl)
self.outputTgt.addConstraint(self.mainOutputConstraint)
# Constrain deformers
self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.getName(), 'To', self.mainCtrl.getName()]))
self.mainDefConstraint.addConstrainer(self.mainCtrl)
self.mainDef.addConstraint(self.mainDefConstraint)
# ===============
# Add Canvas Ops
# ===============
Profiler.getInstance().pop()
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(SimpleControlComponentRig, self).loadData( data )
ctrlSize = data.get('ctrlSize', 1.0)
ctrlXfo = data.get('ctrlXfo', Xfo())
# ================
# Resize Controls
# ================
self.mainCtrl.setShape('square')
self.mainCtrl.rotatePoints(90, 0, 0)
self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))
# =======================
# Set Control Transforms
# =======================
self.mainCtrlSpace.xfo = ctrlXfo
self.mainCtrl.xfo = ctrlXfo
# ============
# Set IO Xfos
# ============
self.mainInputTgt.xfo = ctrlXfo
self.mainDef.xfo = ctrlXfo
self.outputTgt.xfo = ctrlXfo
# ====================
# Evaluate Constraints
# ====================
self.mainInputConstraint.evaluate()
self.mainOutputConstraint.evaluate()
self.mainDefConstraint.evaluate()
# ====================
# Evaluate Canvas Ops
# ====================
from kraken.core.kraken_system import KrakenSystem
ks = KrakenSystem.getInstance()
ks.registerComponent(SimpleControlComponentGuide)
ks.registerComponent(SimpleControlComponentRig)
|
flexible
|
{
"blob_id": "20167058697450f342c2ac3787bd1721f860dc58",
"index": 3169,
"step-1": "<mask token>\n\n\nclass SimpleControlComponentGuide(SimpleControlComponent):\n <mask token>\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Guide Component:' + name)\n super(SimpleControlComponentGuide, self).__init__(name, parent)\n guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)\n self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,\n minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)\n self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)\n self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.\n getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}\n self.loadData(data)\n Profiler.getInstance().pop()\n\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n Return:\n The JSON data object\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).saveData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentGuide, self).loadData(data)\n self.ctrlSizeInputAttr.setValue(data['ctrlSize'])\n self.mainCtrl.xfo = data['ctrlXfo']\n scaleValue = data['ctrlSize']\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n return True\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig.\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).getRigBuildData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def resizeMainCtrl(self, newSize):\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))\n\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n return SimpleControlComponentRig\n\n\nclass SimpleControlComponentRig(SimpleControlComponent):\n \"\"\"Simple Control Component Rig\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Rig Component:' + name)\n super(SimpleControlComponentRig, self).__init__(name, parent)\n self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)\n self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()\n self.mainCtrl.lockScale(x=True, y=True, z=True)\n mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',\n parent=self.mainCtrl)\n self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=\n mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)\n self.rigScaleOutputAttr.connect(self.rigScaleAttr)\n deformersLayer = self.getOrCreateLayer('deformers')\n self.defCmpGrp = ComponentGroup(self.getName(), self, parent=\n deformersLayer)\n self.addItem('defCmpGrp', self.defCmpGrp)\n self.mainDef = Joint('main', parent=self.defCmpGrp)\n self.mainDef.setComponent(self)\n self.mainInputConstraint = PoseConstraint('_'.join([self.\n mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))\n self.mainInputConstraint.setMaintainOffset(True)\n self.mainInputConstraint.addConstrainer(self.mainInputTgt)\n self.mainCtrlSpace.addConstraint(self.mainInputConstraint)\n self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt\n .getName(), 'To', self.mainCtrl.getName()]))\n self.mainOutputConstraint.addConstrainer(self.mainCtrl)\n self.outputTgt.addConstraint(self.mainOutputConstraint)\n self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.\n getName(), 'To', self.mainCtrl.getName()]))\n self.mainDefConstraint.addConstrainer(self.mainCtrl)\n self.mainDef.addConstraint(self.mainDefConstraint)\n Profiler.getInstance().pop()\n\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentRig, self).loadData(data)\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SimpleControlComponent(BaseExampleComponent):\n <mask token>\n\n def __init__(self, name='SimpleControl', parent=None):\n super(SimpleControlComponent, self).__init__(name, parent)\n self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',\n parent=self.inputHrcGrp).getTarget()\n self.outputTgt = self.createOutput('output', dataType='Xfo', parent\n =self.outputHrcGrp).getTarget()\n self.drawDebugInputAttr = self.createInput('drawDebug', dataType=\n 'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()\n self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=\n 'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()\n\n\nclass SimpleControlComponentGuide(SimpleControlComponent):\n \"\"\"Simple Control Component Guide\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Guide Component:' + name)\n super(SimpleControlComponentGuide, self).__init__(name, parent)\n guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)\n self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,\n minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)\n self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)\n self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.\n getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}\n self.loadData(data)\n Profiler.getInstance().pop()\n\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n Return:\n The JSON data object\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).saveData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentGuide, self).loadData(data)\n self.ctrlSizeInputAttr.setValue(data['ctrlSize'])\n self.mainCtrl.xfo = data['ctrlXfo']\n scaleValue = data['ctrlSize']\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n return True\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig.\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).getRigBuildData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def resizeMainCtrl(self, newSize):\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))\n\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n return SimpleControlComponentRig\n\n\nclass SimpleControlComponentRig(SimpleControlComponent):\n \"\"\"Simple Control Component Rig\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Rig Component:' + name)\n super(SimpleControlComponentRig, self).__init__(name, parent)\n self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)\n self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()\n self.mainCtrl.lockScale(x=True, y=True, z=True)\n mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',\n parent=self.mainCtrl)\n self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=\n mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)\n self.rigScaleOutputAttr.connect(self.rigScaleAttr)\n deformersLayer = self.getOrCreateLayer('deformers')\n self.defCmpGrp = ComponentGroup(self.getName(), self, parent=\n deformersLayer)\n self.addItem('defCmpGrp', self.defCmpGrp)\n self.mainDef = Joint('main', parent=self.defCmpGrp)\n self.mainDef.setComponent(self)\n self.mainInputConstraint = PoseConstraint('_'.join([self.\n mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))\n self.mainInputConstraint.setMaintainOffset(True)\n self.mainInputConstraint.addConstrainer(self.mainInputTgt)\n self.mainCtrlSpace.addConstraint(self.mainInputConstraint)\n self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt\n .getName(), 'To', self.mainCtrl.getName()]))\n self.mainOutputConstraint.addConstrainer(self.mainCtrl)\n self.outputTgt.addConstraint(self.mainOutputConstraint)\n self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.\n getName(), 'To', self.mainCtrl.getName()]))\n self.mainDefConstraint.addConstrainer(self.mainCtrl)\n self.mainDef.addConstraint(self.mainDefConstraint)\n Profiler.getInstance().pop()\n\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentRig, self).loadData(data)\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SimpleControlComponent(BaseExampleComponent):\n \"\"\"Simple Control Component Base\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n super(SimpleControlComponent, self).__init__(name, parent)\n self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',\n parent=self.inputHrcGrp).getTarget()\n self.outputTgt = self.createOutput('output', dataType='Xfo', parent\n =self.outputHrcGrp).getTarget()\n self.drawDebugInputAttr = self.createInput('drawDebug', dataType=\n 'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()\n self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=\n 'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()\n\n\nclass SimpleControlComponentGuide(SimpleControlComponent):\n \"\"\"Simple Control Component Guide\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Guide Component:' + name)\n super(SimpleControlComponentGuide, self).__init__(name, parent)\n guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)\n self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,\n minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)\n self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)\n self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.\n getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}\n self.loadData(data)\n Profiler.getInstance().pop()\n\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n Return:\n The JSON data object\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).saveData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentGuide, self).loadData(data)\n self.ctrlSizeInputAttr.setValue(data['ctrlSize'])\n self.mainCtrl.xfo = data['ctrlXfo']\n scaleValue = data['ctrlSize']\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n return True\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig.\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).getRigBuildData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def resizeMainCtrl(self, newSize):\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))\n\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n return SimpleControlComponentRig\n\n\nclass SimpleControlComponentRig(SimpleControlComponent):\n \"\"\"Simple Control Component Rig\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Rig Component:' + name)\n super(SimpleControlComponentRig, self).__init__(name, parent)\n self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)\n self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()\n self.mainCtrl.lockScale(x=True, y=True, z=True)\n mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',\n parent=self.mainCtrl)\n self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=\n mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)\n self.rigScaleOutputAttr.connect(self.rigScaleAttr)\n deformersLayer = self.getOrCreateLayer('deformers')\n self.defCmpGrp = ComponentGroup(self.getName(), self, parent=\n deformersLayer)\n self.addItem('defCmpGrp', self.defCmpGrp)\n self.mainDef = Joint('main', parent=self.defCmpGrp)\n self.mainDef.setComponent(self)\n self.mainInputConstraint = PoseConstraint('_'.join([self.\n mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))\n self.mainInputConstraint.setMaintainOffset(True)\n self.mainInputConstraint.addConstrainer(self.mainInputTgt)\n self.mainCtrlSpace.addConstraint(self.mainInputConstraint)\n self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt\n .getName(), 'To', self.mainCtrl.getName()]))\n self.mainOutputConstraint.addConstrainer(self.mainCtrl)\n self.outputTgt.addConstraint(self.mainOutputConstraint)\n self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.\n getName(), 'To', self.mainCtrl.getName()]))\n self.mainDefConstraint.addConstrainer(self.mainCtrl)\n self.mainDef.addConstraint(self.mainDefConstraint)\n Profiler.getInstance().pop()\n\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentRig, self).loadData(data)\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n\n<mask token>\nks.registerComponent(SimpleControlComponentGuide)\nks.registerComponent(SimpleControlComponentRig)\n",
"step-4": "<mask token>\n\n\nclass SimpleControlComponent(BaseExampleComponent):\n \"\"\"Simple Control Component Base\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n super(SimpleControlComponent, self).__init__(name, parent)\n self.mainInputTgt = self.createInput('mainInput', dataType='Xfo',\n parent=self.inputHrcGrp).getTarget()\n self.outputTgt = self.createOutput('output', dataType='Xfo', parent\n =self.outputHrcGrp).getTarget()\n self.drawDebugInputAttr = self.createInput('drawDebug', dataType=\n 'Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()\n self.rigScaleOutputAttr = self.createOutput('rigScale', dataType=\n 'Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()\n\n\nclass SimpleControlComponentGuide(SimpleControlComponent):\n \"\"\"Simple Control Component Guide\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Guide Component:' + name)\n super(SimpleControlComponentGuide, self).__init__(name, parent)\n guideSettingsAttrGrp = AttributeGroup('GuideSettings', parent=self)\n self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0,\n minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)\n self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)\n self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n data = {'location': 'M', 'ctrlSize': self.ctrlSizeInputAttr.\n getValue(), 'ctrlXfo': Xfo(tr=Vec3(0.0, 0.0, 0.0))}\n self.loadData(data)\n Profiler.getInstance().pop()\n\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n Return:\n The JSON data object\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).saveData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentGuide, self).loadData(data)\n self.ctrlSizeInputAttr.setValue(data['ctrlSize'])\n self.mainCtrl.xfo = data['ctrlXfo']\n scaleValue = data['ctrlSize']\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n return True\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig.\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).getRigBuildData()\n data['ctrlSize'] = self.ctrlSizeInputAttr.getValue()\n data['ctrlXfo'] = self.mainCtrl.xfo\n return data\n\n def resizeMainCtrl(self, newSize):\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))\n\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n return SimpleControlComponentRig\n\n\nclass SimpleControlComponentRig(SimpleControlComponent):\n \"\"\"Simple Control Component Rig\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n Profiler.getInstance().push(\n 'Construct Simple Control Rig Component:' + name)\n super(SimpleControlComponentRig, self).__init__(name, parent)\n self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)\n self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()\n self.mainCtrl.lockScale(x=True, y=True, z=True)\n mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings',\n parent=self.mainCtrl)\n self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=\n mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)\n self.rigScaleOutputAttr.connect(self.rigScaleAttr)\n deformersLayer = self.getOrCreateLayer('deformers')\n self.defCmpGrp = ComponentGroup(self.getName(), self, parent=\n deformersLayer)\n self.addItem('defCmpGrp', self.defCmpGrp)\n self.mainDef = Joint('main', parent=self.defCmpGrp)\n self.mainDef.setComponent(self)\n self.mainInputConstraint = PoseConstraint('_'.join([self.\n mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))\n self.mainInputConstraint.setMaintainOffset(True)\n self.mainInputConstraint.addConstrainer(self.mainInputTgt)\n self.mainCtrlSpace.addConstraint(self.mainInputConstraint)\n self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt\n .getName(), 'To', self.mainCtrl.getName()]))\n self.mainOutputConstraint.addConstrainer(self.mainCtrl)\n self.outputTgt.addConstraint(self.mainOutputConstraint)\n self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.\n getName(), 'To', self.mainCtrl.getName()]))\n self.mainDefConstraint.addConstrainer(self.mainCtrl)\n self.mainDef.addConstraint(self.mainDefConstraint)\n Profiler.getInstance().pop()\n\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n super(SimpleControlComponentRig, self).loadData(data)\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n\n<mask token>\nks = KrakenSystem.getInstance()\nks.registerComponent(SimpleControlComponentGuide)\nks.registerComponent(SimpleControlComponentRig)\n",
"step-5": "from kraken.core.maths import Vec3, Vec3, Euler, Quat, Xfo\n\nfrom kraken.core.objects.components.base_example_component import BaseExampleComponent\n\nfrom kraken.core.objects.attributes.attribute_group import AttributeGroup\nfrom kraken.core.objects.attributes.scalar_attribute import ScalarAttribute\nfrom kraken.core.objects.attributes.bool_attribute import BoolAttribute\n\nfrom kraken.core.objects.constraints.pose_constraint import PoseConstraint\n\nfrom kraken.core.objects.component_group import ComponentGroup\nfrom kraken.core.objects.hierarchy_group import HierarchyGroup\nfrom kraken.core.objects.locator import Locator\nfrom kraken.core.objects.joint import Joint\nfrom kraken.core.objects.ctrlSpace import CtrlSpace\nfrom kraken.core.objects.control import Control\n\nfrom kraken.core.objects.operators.kl_operator import KLOperator\n\nfrom kraken.core.profiler import Profiler\nfrom kraken.helpers.utility_methods import logHierarchy\n\n\nclass SimpleControlComponent(BaseExampleComponent):\n \"\"\"Simple Control Component Base\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n super(SimpleControlComponent, self).__init__(name, parent)\n\n\n # ===========\n # Declare IO\n # ===========\n # Declare Inputs Xfos\n self.mainInputTgt = self.createInput('mainInput', dataType='Xfo', parent=self.inputHrcGrp).getTarget()\n\n # Declare Output Xfos\n self.outputTgt = self.createOutput('output', dataType='Xfo', parent=self.outputHrcGrp).getTarget()\n\n # Declare Input Attrs\n self.drawDebugInputAttr = self.createInput('drawDebug', dataType='Boolean', value=False, parent=self.cmpInputAttrGrp).getTarget()\n\n # Declare Output Attrs\n self.rigScaleOutputAttr = self.createOutput('rigScale', dataType='Float', value=1.0, parent=self.cmpOutputAttrGrp).getTarget()\n\n\nclass SimpleControlComponentGuide(SimpleControlComponent):\n \"\"\"Simple Control Component Guide\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n\n Profiler.getInstance().push(\"Construct Simple Control Guide Component:\" + name)\n super(SimpleControlComponentGuide, self).__init__(name, parent)\n\n # =========\n # Attributes\n # =========\n # Add Component Params to IK control\n guideSettingsAttrGrp = AttributeGroup(\"GuideSettings\", parent=self)\n\n self.ctrlSizeInputAttr = ScalarAttribute('ctrlSize', value=5.0, minValue=1.0, maxValue=50.0, parent=guideSettingsAttrGrp)\n self.ctrlSizeInputAttr.setValueChangeCallback(self.resizeMainCtrl)\n\n # =========\n # Controls\n # =========\n\n # Guide Controls\n self.mainCtrl = Control('main', parent=self.ctrlCmpGrp, shape='square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n\n data = {\n \"location\": 'M',\n \"ctrlSize\": self.ctrlSizeInputAttr.getValue(),\n \"ctrlXfo\": Xfo(tr=Vec3(0.0, 0.0, 0.0))\n }\n\n self.loadData(data)\n\n Profiler.getInstance().pop()\n\n\n # =============\n # Data Methods\n # =============\n def saveData(self):\n \"\"\"Save the data for the component to be persisted.\n\n Return:\n The JSON data object\n\n \"\"\"\n data = super(SimpleControlComponentGuide, self).saveData()\n\n data[\"ctrlSize\"] = self.ctrlSizeInputAttr.getValue()\n data[\"ctrlXfo\"] = self.mainCtrl.xfo\n\n return data\n\n\n def loadData(self, data):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n\n super(SimpleControlComponentGuide, self).loadData( data )\n\n self.ctrlSizeInputAttr.setValue(data[\"ctrlSize\"])\n self.mainCtrl.xfo = data[\"ctrlXfo\"]\n\n scaleValue = data[\"ctrlSize\"]\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(scaleValue, scaleValue, scaleValue))\n\n return True\n\n\n def getRigBuildData(self):\n \"\"\"Returns the Guide data used by the Rig Component to define the layout of the final rig.\n\n Return:\n The JSON rig data object.\n\n \"\"\"\n\n data = super(SimpleControlComponentGuide, self).getRigBuildData()\n\n data[\"ctrlSize\"] = self.ctrlSizeInputAttr.getValue()\n data[\"ctrlXfo\"] = self.mainCtrl.xfo\n\n return data\n\n # ==========\n # Callbacks\n # ==========\n def resizeMainCtrl(self, newSize):\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(newSize, newSize, newSize))\n\n # ==============\n # Class Methods\n # ==============\n @classmethod\n def getComponentType(cls):\n \"\"\"Enables introspection of the class prior to construction to determine if it is a guide component.\n\n Return:\n The true if this component is a guide component.\n\n \"\"\"\n\n return 'Guide'\n\n @classmethod\n def getRigComponentClass(cls):\n \"\"\"Returns the corresponding rig component class for this guide component class\n\n Return:\n The rig component class.\n\n \"\"\"\n\n return SimpleControlComponentRig\n\n\nclass SimpleControlComponentRig(SimpleControlComponent):\n \"\"\"Simple Control Component Rig\"\"\"\n\n def __init__(self, name='SimpleControl', parent=None):\n\n Profiler.getInstance().push(\"Construct Simple Control Rig Component:\" + name)\n super(SimpleControlComponentRig, self).__init__(name, parent)\n\n\n # =========\n # Controls\n # =========\n # Add Controls\n self.mainCtrl = Control('main', shape='square', parent=self.ctrlCmpGrp)\n self.mainCtrlSpace = self.mainCtrl.insertCtrlSpace()\n self.mainCtrl.lockScale(x=True, y=True, z=True)\n\n # Add Component Params to Main control\n mainSrtSettingsAttrGrp = AttributeGroup('DisplayInfo_MainSrtSettings', parent=self.mainCtrl)\n self.rigScaleAttr = ScalarAttribute('rigScale', value=1.0, parent=mainSrtSettingsAttrGrp, minValue=0.1, maxValue=100.0)\n\n self.rigScaleOutputAttr.connect(self.rigScaleAttr)\n\n # ==========\n # Deformers\n # ==========\n deformersLayer = self.getOrCreateLayer('deformers')\n self.defCmpGrp = ComponentGroup(self.getName(), self, parent=deformersLayer)\n self.addItem('defCmpGrp', self.defCmpGrp)\n self.mainDef = Joint('main', parent=self.defCmpGrp)\n self.mainDef.setComponent(self)\n\n # ==============\n # Constrain I/O\n # ==============\n # Constrain inputs\n self.mainInputConstraint = PoseConstraint('_'.join([self.mainCtrlSpace.getName(), 'To', self.mainInputTgt.getName()]))\n self.mainInputConstraint.setMaintainOffset(True)\n self.mainInputConstraint.addConstrainer(self.mainInputTgt)\n self.mainCtrlSpace.addConstraint(self.mainInputConstraint)\n\n # Constrain outputs\n self.mainOutputConstraint = PoseConstraint('_'.join([self.outputTgt.getName(), 'To', self.mainCtrl.getName()]))\n self.mainOutputConstraint.addConstrainer(self.mainCtrl)\n self.outputTgt.addConstraint(self.mainOutputConstraint)\n\n # Constrain deformers\n self.mainDefConstraint = PoseConstraint('_'.join([self.mainDef.getName(), 'To', self.mainCtrl.getName()]))\n self.mainDefConstraint.addConstrainer(self.mainCtrl)\n self.mainDef.addConstraint(self.mainDefConstraint)\n\n # ===============\n # Add Canvas Ops\n # ===============\n\n Profiler.getInstance().pop()\n\n\n def loadData(self, data=None):\n \"\"\"Load a saved guide representation from persisted data.\n\n Arguments:\n data -- object, The JSON data object.\n\n Return:\n True if successful.\n\n \"\"\"\n\n super(SimpleControlComponentRig, self).loadData( data )\n\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n\n # ================\n # Resize Controls\n # ================\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n\n # =======================\n # Set Control Transforms\n # =======================\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n\n # ============\n # Set IO Xfos\n # ============\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n\n # ====================\n # Evaluate Constraints\n # ====================\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n # ====================\n # Evaluate Canvas Ops\n # ====================\n\n\nfrom kraken.core.kraken_system import KrakenSystem\nks = KrakenSystem.getInstance()\nks.registerComponent(SimpleControlComponentGuide)\nks.registerComponent(SimpleControlComponentRig)\n",
"step-ids": [
12,
15,
17,
18,
20
]
}
|
[
12,
15,
17,
18,
20
] |
def del_ops3(str1, str2):
# find all common letters in both strings
common1 = [x for x in str1 if x in str2]
common2 = [x for x in str2 if x in str1]
if len(common2) < len(common1):
common1, common2 = common2, common1
# find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)
if len(common1) == 0 or len(common2) == 0:
total = len(str1) + len(str2)
elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):
total = (len(str1) - 1) + (len(str2) - 1)
# else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2
else:
# create references to c2 indexes of each letter in c1
refs = defaultdict(list)
for i, letter in enumerate(common2):
refs[letter].append(i)
# find all letters that follow each other (same order) in both strings
substring = [] # substring == all common letters in same sequence in both strings
previous = min(refs[common1[0]])
for i, letter in enumerate(common1):
# if any c2 index of the current letter in c1 is > the c2 index of previous letter:
# the current letter follows the previous letter in both c1 and c2
if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):
# if the same letter at the same index is not already in substring:
if all([hash(x) != hash(common2[previous]) for x in substring]):
substring.append(common2[previous])
substring.append(letter)
previous = min([x for x in refs[letter] if x >= previous])
# next iteration of previous is always == the smallest index
# of the current letter that is >= current iteration of previous
# (always > previous if not first iteration in c1)
# indexes are never repeated or skipped
# elif the letter does not follow the same letter in both strings:
# previous = smallest c2 index of letter that broke sequence/did not follow in both strings
elif all(refs[letter]) < previous:
previous = min([x for x in refs[letter]])
print(i, previous, letter, substring)
# total == total of all letters - (number of letters in substring * 2)
total = (len(str1) - len(substring)) + (len(str2) - len(substring))
return "".join(substring)
|
normal
|
{
"blob_id": "f9d1013fa278b9078e603b012abbdde0be2e0962",
"index": 7926,
"step-1": "<mask token>\n",
"step-2": "def del_ops3(str1, str2):\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or len(common1) == 2 and len(\n common2) == 2 and common1 != common2:\n total = len(str1) - 1 + (len(str2) - 1)\n else:\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n substring = []\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n if any([(i > previous) for i in refs[letter]]) and all([(i !=\n previous) for i in refs[letter]]):\n if all([(hash(x) != hash(common2[previous])) for x in\n substring]):\n substring.append(common2[previous])\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n total = len(str1) - len(substring) + (len(str2) - len(substring))\n return ''.join(substring)\n",
"step-3": "def del_ops3(str1, str2):\n\n # find all common letters in both strings\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n\n # find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):\n total = (len(str1) - 1) + (len(str2) - 1)\n\n # else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2\n else:\n\n # create references to c2 indexes of each letter in c1\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n\n # find all letters that follow each other (same order) in both strings\n substring = [] # substring == all common letters in same sequence in both strings\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n\n # if any c2 index of the current letter in c1 is > the c2 index of previous letter:\n # the current letter follows the previous letter in both c1 and c2\n if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):\n\n # if the same letter at the same index is not already in substring:\n if all([hash(x) != hash(common2[previous]) for x in substring]):\n substring.append(common2[previous])\n\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n # next iteration of previous is always == the smallest index\n # of the current letter that is >= current iteration of previous\n # (always > previous if not first iteration in c1)\n # indexes are never repeated or skipped\n\n # elif the letter does not follow the same letter in both strings:\n # previous = smallest c2 index of letter that broke sequence/did not follow in both strings\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n # total == total of all letters - (number of letters in substring * 2)\n total = (len(str1) - len(substring)) + (len(str2) - len(substring))\n\n return \"\".join(substring)\n \n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(md5.hexdigest())
<|reserved_special_token_0|>
print(sha1.hexdigest())
<|reserved_special_token_0|>
print(sha224.hexdigest())
<|reserved_special_token_0|>
print(sha256.hexdigest())
<|reserved_special_token_0|>
print(sha384.hexdigest())
<|reserved_special_token_0|>
print(sha512.hexdigest())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
md5 = hashlib.md5(b'Najmul')
print(md5.hexdigest())
sha1 = hashlib.sha1(b'Najmul')
print(sha1.hexdigest())
sha224 = hashlib.sha224(b'Najmul')
print(sha224.hexdigest())
sha256 = hashlib.sha256(b'Najmul')
print(sha256.hexdigest())
sha384 = hashlib.sha384(b'Najmul')
print(sha384.hexdigest())
sha512 = hashlib.sha512(b'Najmul')
print(sha512.hexdigest())
<|reserved_special_token_1|>
import hashlib
md5 = hashlib.md5(b'Najmul')
print(md5.hexdigest())
sha1 = hashlib.sha1(b'Najmul')
print(sha1.hexdigest())
sha224 = hashlib.sha224(b'Najmul')
print(sha224.hexdigest())
sha256 = hashlib.sha256(b'Najmul')
print(sha256.hexdigest())
sha384 = hashlib.sha384(b'Najmul')
print(sha384.hexdigest())
sha512 = hashlib.sha512(b'Najmul')
print(sha512.hexdigest())
|
flexible
|
{
"blob_id": "ab4c668c8a167f8c387199b7aa49aa742d563250",
"index": 1698,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(md5.hexdigest())\n<mask token>\nprint(sha1.hexdigest())\n<mask token>\nprint(sha224.hexdigest())\n<mask token>\nprint(sha256.hexdigest())\n<mask token>\nprint(sha384.hexdigest())\n<mask token>\nprint(sha512.hexdigest())\n",
"step-3": "<mask token>\nmd5 = hashlib.md5(b'Najmul')\nprint(md5.hexdigest())\nsha1 = hashlib.sha1(b'Najmul')\nprint(sha1.hexdigest())\nsha224 = hashlib.sha224(b'Najmul')\nprint(sha224.hexdigest())\nsha256 = hashlib.sha256(b'Najmul')\nprint(sha256.hexdigest())\nsha384 = hashlib.sha384(b'Najmul')\nprint(sha384.hexdigest())\nsha512 = hashlib.sha512(b'Najmul')\nprint(sha512.hexdigest())\n",
"step-4": "import hashlib\nmd5 = hashlib.md5(b'Najmul')\nprint(md5.hexdigest())\nsha1 = hashlib.sha1(b'Najmul')\nprint(sha1.hexdigest())\nsha224 = hashlib.sha224(b'Najmul')\nprint(sha224.hexdigest())\nsha256 = hashlib.sha256(b'Najmul')\nprint(sha256.hexdigest())\nsha384 = hashlib.sha384(b'Najmul')\nprint(sha384.hexdigest())\nsha512 = hashlib.sha512(b'Najmul')\nprint(sha512.hexdigest())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Peptide Encoding Problem: Find substrings of a genome encoding a given amino acid sequence.
# Input: A DNA string Text, an amino acid string Peptide, and the array GeneticCode.
# Output: All substrings of Text encoding Peptide (if any such substrings exist).
def reverse_string(seq):
return seq[::-1]
def complement(seq):
#return the complementary sequence string.
seq=seq.upper()
basecomplement={"A":"T","C":"G","G":"C","T":"A","N":"N"}
letters=list(seq)
letters=[basecomplement[base] for base in letters]
return ''.join(letters)
def reversecomplement(seq):
#return the reverse complement of the dna string.
seq=reverse_string(seq)
seq=complement(seq)
return seq
def DNA_To_AA(seq):
RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA': '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H',
'CGT': 'R', 'CGG': 'R', 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG': 'Q',
'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I', 'CAT': 'H', 'GGC': 'G', 'GGG': 'G',
'GCT': 'A', 'GAT': 'D', 'GCA': 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',
'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG': 'W', 'AGA': 'R', 'TTT': 'F',
'TAG': '*', 'TGC': 'C', 'GGA': 'G', 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V',
'CGC': 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E', 'TCT': 'S', 'ATT': 'I',
'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA': 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}
F_position = 0
R_position = 0
Aa=""
for i in range(int(len(seq) / 3)):
F_position = i*3
R_position = F_position+3
RNA_one=seq[F_position:R_position]
#if RNA_one == "TAA" or RNA_one == "TAG" or RNA_one == "TGA":
# break
Aa += RNA_AA_dict[RNA_one]
return Aa
def Peptide_Encoding(DNA,AA_input):
AA= DNA_To_AA(DNA)
print(AA)
l=len(AA_input)
return_DNA=[]
find_position=0
#print(DNA,AA,l,return_DNA,find_position)
while AA_input in AA[find_position:]:
#print(AA[find_position:])
AA_position = find_position + AA[find_position:].find(AA_input)
DNA_position = 3 * AA_position
#print(AA_position, DNA_position)
return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])
find_position = AA_position + 1
#print(find_position)
return return_DNA
DNA=""
filename = input("Enter file name: ")
fileread = open(filename, "r")
for i in fileread:
read = i.strip()
DNA+=read.upper()
print(DNA[:200])
F_position=0
R_position=0
Aa_input=input("what is the aa?")
DNA_F_1=DNA
print1=Peptide_Encoding(DNA_F_1,Aa_input)
if print1!=[]:
for i in print1:
print("1",i)
DNA_F_2=DNA[1:]
print2=Peptide_Encoding(DNA_F_2,Aa_input)
if print2!=[]:
for i in print2:
print("2",i)
DNA_F_3=DNA[2:]
print3=Peptide_Encoding(DNA_F_3,Aa_input)
if print3!=[]:
for i in print3:
print("3",i)
RC_DNA=reversecomplement(DNA)
DNA_R_1=RC_DNA
print4=Peptide_Encoding(DNA_R_1,Aa_input)
if print4!=[]:
for i in print4:
print("4",reversecomplement(i))
DNA_R_2=RC_DNA[1:]
print5=Peptide_Encoding(DNA_R_2,Aa_input)
if print5!=[]:
for i in print5:
print("5",reversecomplement(i))
DNA_R_3=RC_DNA[2:]
print6=Peptide_Encoding(DNA_R_3,Aa_input)
if print6!=[]:
for i in print6:
print("6",reversecomplement(i))
#print(DNA_F_1,DNA_F_2,DNA_F_3,DNA_R_1,DNA_R_2,DNA_R_3)
#print(Aa_F_1,Aa_F_2,Aa_F_3,Aa_R_1,Aa_R_2,Aa_R_3)
#根据AA序列在基因组中寻找相关序列
#Bacillus brevis.txt
#VKLFPWFNQY
|
normal
|
{
"blob_id": "0f2d215a34758f85a29ef7ed8264fccd5e85b66f",
"index": 3017,
"step-1": "def reverse_string(seq):\n return seq[::-1]\n\n\ndef complement(seq):\n seq = seq.upper()\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(seq)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n\n\ndef reversecomplement(seq):\n seq = reverse_string(seq)\n seq = complement(seq)\n return seq\n\n\ndef DNA_To_AA(seq):\n RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA':\n '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H', 'CGT': 'R', 'CGG': 'R',\n 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG':\n 'Q', 'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I',\n 'CAT': 'H', 'GGC': 'G', 'GGG': 'G', 'GCT': 'A', 'GAT': 'D', 'GCA':\n 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',\n 'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG':\n 'W', 'AGA': 'R', 'TTT': 'F', 'TAG': '*', 'TGC': 'C', 'GGA': 'G',\n 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V', 'CGC':\n 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E',\n 'TCT': 'S', 'ATT': 'I', 'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA':\n 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}\n F_position = 0\n R_position = 0\n Aa = ''\n for i in range(int(len(seq) / 3)):\n F_position = i * 3\n R_position = F_position + 3\n RNA_one = seq[F_position:R_position]\n Aa += RNA_AA_dict[RNA_one]\n return Aa\n\n\n<mask token>\n",
"step-2": "def reverse_string(seq):\n return seq[::-1]\n\n\ndef complement(seq):\n seq = seq.upper()\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(seq)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n\n\ndef reversecomplement(seq):\n seq = reverse_string(seq)\n seq = complement(seq)\n return seq\n\n\ndef DNA_To_AA(seq):\n RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA':\n '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H', 'CGT': 'R', 'CGG': 'R',\n 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG':\n 'Q', 'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I',\n 'CAT': 'H', 'GGC': 'G', 'GGG': 'G', 'GCT': 'A', 'GAT': 'D', 'GCA':\n 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',\n 'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG':\n 'W', 'AGA': 'R', 'TTT': 'F', 'TAG': '*', 'TGC': 'C', 'GGA': 'G',\n 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V', 'CGC':\n 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E',\n 'TCT': 'S', 'ATT': 'I', 'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA':\n 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}\n F_position = 0\n R_position = 0\n Aa = ''\n for i in range(int(len(seq) / 3)):\n F_position = i * 3\n R_position = F_position + 3\n RNA_one = seq[F_position:R_position]\n Aa += RNA_AA_dict[RNA_one]\n return Aa\n\n\ndef Peptide_Encoding(DNA, AA_input):\n AA = DNA_To_AA(DNA)\n print(AA)\n l = len(AA_input)\n return_DNA = []\n find_position = 0\n while AA_input in AA[find_position:]:\n AA_position = find_position + AA[find_position:].find(AA_input)\n DNA_position = 3 * AA_position\n return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])\n find_position = AA_position + 1\n return return_DNA\n\n\n<mask token>\n",
"step-3": "def reverse_string(seq):\n return seq[::-1]\n\n\ndef complement(seq):\n seq = seq.upper()\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(seq)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n\n\ndef reversecomplement(seq):\n seq = reverse_string(seq)\n seq = complement(seq)\n return seq\n\n\ndef DNA_To_AA(seq):\n RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA':\n '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H', 'CGT': 'R', 'CGG': 'R',\n 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG':\n 'Q', 'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I',\n 'CAT': 'H', 'GGC': 'G', 'GGG': 'G', 'GCT': 'A', 'GAT': 'D', 'GCA':\n 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',\n 'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG':\n 'W', 'AGA': 'R', 'TTT': 'F', 'TAG': '*', 'TGC': 'C', 'GGA': 'G',\n 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V', 'CGC':\n 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E',\n 'TCT': 'S', 'ATT': 'I', 'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA':\n 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}\n F_position = 0\n R_position = 0\n Aa = ''\n for i in range(int(len(seq) / 3)):\n F_position = i * 3\n R_position = F_position + 3\n RNA_one = seq[F_position:R_position]\n Aa += RNA_AA_dict[RNA_one]\n return Aa\n\n\ndef Peptide_Encoding(DNA, AA_input):\n AA = DNA_To_AA(DNA)\n print(AA)\n l = len(AA_input)\n return_DNA = []\n find_position = 0\n while AA_input in AA[find_position:]:\n AA_position = find_position + AA[find_position:].find(AA_input)\n DNA_position = 3 * AA_position\n return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])\n find_position = AA_position + 1\n return return_DNA\n\n\n<mask token>\nfor i in fileread:\n read = i.strip()\n DNA += read.upper()\nprint(DNA[:200])\n<mask token>\nif print1 != []:\n for i in print1:\n print('1', i)\n<mask token>\nif print2 != []:\n for i in print2:\n print('2', i)\n<mask token>\nif print3 != []:\n for i in print3:\n print('3', i)\n<mask token>\nif print4 != []:\n for i in print4:\n print('4', reversecomplement(i))\n<mask token>\nif print5 != []:\n for i in print5:\n print('5', reversecomplement(i))\n<mask token>\nif print6 != []:\n for i in print6:\n print('6', reversecomplement(i))\n",
"step-4": "def reverse_string(seq):\n return seq[::-1]\n\n\ndef complement(seq):\n seq = seq.upper()\n basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}\n letters = list(seq)\n letters = [basecomplement[base] for base in letters]\n return ''.join(letters)\n\n\ndef reversecomplement(seq):\n seq = reverse_string(seq)\n seq = complement(seq)\n return seq\n\n\ndef DNA_To_AA(seq):\n RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA':\n '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H', 'CGT': 'R', 'CGG': 'R',\n 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG':\n 'Q', 'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I',\n 'CAT': 'H', 'GGC': 'G', 'GGG': 'G', 'GCT': 'A', 'GAT': 'D', 'GCA':\n 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',\n 'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG':\n 'W', 'AGA': 'R', 'TTT': 'F', 'TAG': '*', 'TGC': 'C', 'GGA': 'G',\n 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V', 'CGC':\n 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E',\n 'TCT': 'S', 'ATT': 'I', 'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA':\n 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}\n F_position = 0\n R_position = 0\n Aa = ''\n for i in range(int(len(seq) / 3)):\n F_position = i * 3\n R_position = F_position + 3\n RNA_one = seq[F_position:R_position]\n Aa += RNA_AA_dict[RNA_one]\n return Aa\n\n\ndef Peptide_Encoding(DNA, AA_input):\n AA = DNA_To_AA(DNA)\n print(AA)\n l = len(AA_input)\n return_DNA = []\n find_position = 0\n while AA_input in AA[find_position:]:\n AA_position = find_position + AA[find_position:].find(AA_input)\n DNA_position = 3 * AA_position\n return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])\n find_position = AA_position + 1\n return return_DNA\n\n\nDNA = ''\nfilename = input('Enter file name: ')\nfileread = open(filename, 'r')\nfor i in fileread:\n read = i.strip()\n DNA += read.upper()\nprint(DNA[:200])\nF_position = 0\nR_position = 0\nAa_input = input('what is the aa?')\nDNA_F_1 = DNA\nprint1 = Peptide_Encoding(DNA_F_1, Aa_input)\nif print1 != []:\n for i in print1:\n print('1', i)\nDNA_F_2 = DNA[1:]\nprint2 = Peptide_Encoding(DNA_F_2, Aa_input)\nif print2 != []:\n for i in print2:\n print('2', i)\nDNA_F_3 = DNA[2:]\nprint3 = Peptide_Encoding(DNA_F_3, Aa_input)\nif print3 != []:\n for i in print3:\n print('3', i)\nRC_DNA = reversecomplement(DNA)\nDNA_R_1 = RC_DNA\nprint4 = Peptide_Encoding(DNA_R_1, Aa_input)\nif print4 != []:\n for i in print4:\n print('4', reversecomplement(i))\nDNA_R_2 = RC_DNA[1:]\nprint5 = Peptide_Encoding(DNA_R_2, Aa_input)\nif print5 != []:\n for i in print5:\n print('5', reversecomplement(i))\nDNA_R_3 = RC_DNA[2:]\nprint6 = Peptide_Encoding(DNA_R_3, Aa_input)\nif print6 != []:\n for i in print6:\n print('6', reversecomplement(i))\n",
"step-5": "#Peptide Encoding Problem: Find substrings of a genome encoding a given amino acid sequence.\n# Input: A DNA string Text, an amino acid string Peptide, and the array GeneticCode.\n# Output: All substrings of Text encoding Peptide (if any such substrings exist).\ndef reverse_string(seq):\n return seq[::-1]\n\ndef complement(seq):\n#return the complementary sequence string.\n seq=seq.upper()\n basecomplement={\"A\":\"T\",\"C\":\"G\",\"G\":\"C\",\"T\":\"A\",\"N\":\"N\"}\n letters=list(seq)\n letters=[basecomplement[base] for base in letters]\n return ''.join(letters)\n\n\ndef reversecomplement(seq):\n #return the reverse complement of the dna string.\n seq=reverse_string(seq)\n seq=complement(seq)\n return seq\n\ndef DNA_To_AA(seq):\n RNA_AA_dict = {'TCC': 'S', 'TAC': 'Y', 'AGT': 'S', 'ACG': 'T', 'TAA': '*', 'TTA': 'L', 'GTC': 'V', 'CAC': 'H',\n 'CGT': 'R', 'CGG': 'R', 'CTC': 'L', 'AGG': 'R', 'ACA': 'T', 'TCA': 'S', 'CCT': 'P', 'CAG': 'Q',\n 'ACC': 'T', 'TTC': 'F', 'ATC': 'I', 'AAT': 'N', 'ATA': 'I', 'CAT': 'H', 'GGC': 'G', 'GGG': 'G',\n 'GCT': 'A', 'GAT': 'D', 'GCA': 'A', 'GCG': 'A', 'GTA': 'V', 'GAC': 'D', 'CTT': 'L', 'CAA': 'Q',\n 'CCG': 'P', 'AAG': 'K', 'GTT': 'V', 'GGT': 'G', 'TAT': 'Y', 'TGG': 'W', 'AGA': 'R', 'TTT': 'F',\n 'TAG': '*', 'TGC': 'C', 'GGA': 'G', 'CCA': 'P', 'GCC': 'A', 'CGA': 'R', 'AAA': 'K', 'GTG': 'V',\n 'CGC': 'R', 'CTG': 'L', 'TCG': 'S', 'TTG': 'L', 'GAA': 'E', 'GAG': 'E', 'TCT': 'S', 'ATT': 'I',\n 'AAC': 'N', 'ACT': 'T', 'TGT': 'C', 'CTA': 'L', 'ATG': 'M', 'CCC': 'P', 'AGC': 'S', 'TGA': '*'}\n F_position = 0\n R_position = 0\n Aa=\"\"\n for i in range(int(len(seq) / 3)):\n F_position = i*3\n R_position = F_position+3\n RNA_one=seq[F_position:R_position]\n #if RNA_one == \"TAA\" or RNA_one == \"TAG\" or RNA_one == \"TGA\":\n # break\n Aa += RNA_AA_dict[RNA_one]\n\n return Aa\n\ndef Peptide_Encoding(DNA,AA_input):\n AA= DNA_To_AA(DNA)\n print(AA)\n l=len(AA_input)\n return_DNA=[]\n find_position=0\n #print(DNA,AA,l,return_DNA,find_position)\n\n\n while AA_input in AA[find_position:]:\n #print(AA[find_position:])\n AA_position = find_position + AA[find_position:].find(AA_input)\n DNA_position = 3 * AA_position\n #print(AA_position, DNA_position)\n return_DNA.append(DNA[DNA_position:DNA_position + 3 * l])\n find_position = AA_position + 1\n #print(find_position)\n return return_DNA\n\n\n\nDNA=\"\"\nfilename = input(\"Enter file name: \")\nfileread = open(filename, \"r\")\nfor i in fileread:\n read = i.strip()\n DNA+=read.upper()\nprint(DNA[:200])\n\nF_position=0\nR_position=0\n\nAa_input=input(\"what is the aa?\")\n\n\n\nDNA_F_1=DNA\nprint1=Peptide_Encoding(DNA_F_1,Aa_input)\nif print1!=[]:\n for i in print1:\n print(\"1\",i)\n\n\nDNA_F_2=DNA[1:]\nprint2=Peptide_Encoding(DNA_F_2,Aa_input)\nif print2!=[]:\n for i in print2:\n print(\"2\",i)\n\nDNA_F_3=DNA[2:]\nprint3=Peptide_Encoding(DNA_F_3,Aa_input)\nif print3!=[]:\n for i in print3:\n print(\"3\",i)\n\nRC_DNA=reversecomplement(DNA)\n\nDNA_R_1=RC_DNA\nprint4=Peptide_Encoding(DNA_R_1,Aa_input)\nif print4!=[]:\n for i in print4:\n print(\"4\",reversecomplement(i))\n\nDNA_R_2=RC_DNA[1:]\nprint5=Peptide_Encoding(DNA_R_2,Aa_input)\nif print5!=[]:\n for i in print5:\n print(\"5\",reversecomplement(i))\n\nDNA_R_3=RC_DNA[2:]\nprint6=Peptide_Encoding(DNA_R_3,Aa_input)\nif print6!=[]:\n for i in print6:\n print(\"6\",reversecomplement(i))\n\n#print(DNA_F_1,DNA_F_2,DNA_F_3,DNA_R_1,DNA_R_2,DNA_R_3)\n#print(Aa_F_1,Aa_F_2,Aa_F_3,Aa_R_1,Aa_R_2,Aa_R_3)\n\n#根据AA序列在基因组中寻找相关序列\n\n\n\n\n#Bacillus brevis.txt\n#VKLFPWFNQY",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate(lines):
line = line.rstrip()
if line.startswith('\tBibID & rank') and n_lines > i + 2 and lines[
i + 2].startswith(''):
bibline = re.findall('\\d+\\s-\\s', lines[i + 1])
dupeid = re.findall('\\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith('\tAdding Bib'):
line = re.findall('\\d+', str(line))
bibs.append(line)
elif line.startswith('MFHD_ID '):
line = re.findall('\\d+', str(line))
mfhds.append(line)
elif line.startswith('ITEM_ID '):
line = re.findall('\\d+', str(line))
items.append(line)
else:
continue
for row in duplicates:
ws1.append(row)
for r in range(0, len(bibs)):
ws2.cell(row=r + 1, column=1).value = bibs[r][0]
for r in range(0, len(mfhds)):
ws2.cell(row=r + 1, column=2).value = mfhds[r][0]
for r in range(0, len(items)):
ws2.cell(row=r + 1, column=3).value = items[r][0]
wb1.save(fout + '.xlsx')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fname = input('Enter input file, including extension: ')
fout = input('Enter output file, without extension: ')
fh = open(fname, 'r')
duplicates = [['Duplicate Bib ID']]
bibs = [['Bib ID']]
mfhds = [['MFHD ID']]
items = [['Item ID']]
wb1 = openpyxl.Workbook()
ws1 = wb1.active
ws1.title = 'Duplicate Bibs'
ws2 = wb1.create_sheet(index=1, title='IDs Added')
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate(lines):
line = line.rstrip()
if line.startswith('\tBibID & rank') and n_lines > i + 2 and lines[
i + 2].startswith(''):
bibline = re.findall('\\d+\\s-\\s', lines[i + 1])
dupeid = re.findall('\\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith('\tAdding Bib'):
line = re.findall('\\d+', str(line))
bibs.append(line)
elif line.startswith('MFHD_ID '):
line = re.findall('\\d+', str(line))
mfhds.append(line)
elif line.startswith('ITEM_ID '):
line = re.findall('\\d+', str(line))
items.append(line)
else:
continue
for row in duplicates:
ws1.append(row)
for r in range(0, len(bibs)):
ws2.cell(row=r + 1, column=1).value = bibs[r][0]
for r in range(0, len(mfhds)):
ws2.cell(row=r + 1, column=2).value = mfhds[r][0]
for r in range(0, len(items)):
ws2.cell(row=r + 1, column=3).value = items[r][0]
wb1.save(fout + '.xlsx')
<|reserved_special_token_1|>
import re
import openpyxl
fname = input('Enter input file, including extension: ')
fout = input('Enter output file, without extension: ')
fh = open(fname, 'r')
duplicates = [['Duplicate Bib ID']]
bibs = [['Bib ID']]
mfhds = [['MFHD ID']]
items = [['Item ID']]
wb1 = openpyxl.Workbook()
ws1 = wb1.active
ws1.title = 'Duplicate Bibs'
ws2 = wb1.create_sheet(index=1, title='IDs Added')
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate(lines):
line = line.rstrip()
if line.startswith('\tBibID & rank') and n_lines > i + 2 and lines[
i + 2].startswith(''):
bibline = re.findall('\\d+\\s-\\s', lines[i + 1])
dupeid = re.findall('\\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith('\tAdding Bib'):
line = re.findall('\\d+', str(line))
bibs.append(line)
elif line.startswith('MFHD_ID '):
line = re.findall('\\d+', str(line))
mfhds.append(line)
elif line.startswith('ITEM_ID '):
line = re.findall('\\d+', str(line))
items.append(line)
else:
continue
for row in duplicates:
ws1.append(row)
for r in range(0, len(bibs)):
ws2.cell(row=r + 1, column=1).value = bibs[r][0]
for r in range(0, len(mfhds)):
ws2.cell(row=r + 1, column=2).value = mfhds[r][0]
for r in range(0, len(items)):
ws2.cell(row=r + 1, column=3).value = items[r][0]
wb1.save(fout + '.xlsx')
<|reserved_special_token_1|>
#This script reads through a Voyager import log and outputs duplicate bib IDs as well as the IDs of bibs, mfhds, and items created.
#import regular expressions and openpyxl
import re
import openpyxl
# prompt for file names
fname = input("Enter input file, including extension: ")
fout = input("Enter output file, without extension: ")
fh = open(fname, "r")
# set up lists
duplicates = [["Duplicate Bib ID"]]
bibs = [["Bib ID"]]
mfhds = [["MFHD ID"]]
items = [["Item ID"]]
# create and open workbook with two sheets
wb1=openpyxl.Workbook()
ws1=wb1.active
ws1.title = "Duplicate Bibs"
ws2 = wb1.create_sheet(index=1, title="IDs Added")
# read through file, extract the line after the line starting with BibID & rank and write to lists
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate (lines) :
line = line.rstrip()
if line.startswith(" BibID & rank") and \
n_lines > i + 2 and lines[i + 2].startswith("") :
bibline = re.findall(r'\d+\s-\s', lines[i+1])
dupeid = re.findall(r'\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith(" Adding Bib") :
line = re.findall(r'\d+',str(line))
bibs.append(line)
elif line.startswith("MFHD_ID ") :
line = re.findall(r'\d+',str(line))
mfhds.append(line)
elif line.startswith("ITEM_ID ") :
line = re.findall(r'\d+',str(line))
items.append(line)
else :
continue
# write the lists to columns in the spreadsheet and save
for row in duplicates:
ws1.append(row)
for r in range(0,len(bibs)):
ws2.cell(row=r+1,column=1).value=bibs[r][0]
for r in range(0,len(mfhds)):
ws2.cell(row=r+1,column=2).value=mfhds[r][0]
for r in range(0,len(items)):
ws2.cell(row=r+1,column=3).value=items[r][0]
wb1.save(fout + ".xlsx")
|
flexible
|
{
"blob_id": "fc06d8a26a99c16a4b38ad0b4bbb28a1dc522991",
"index": 6902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-3": "<mask token>\nfname = input('Enter input file, including extension: ')\nfout = input('Enter output file, without extension: ')\nfh = open(fname, 'r')\nduplicates = [['Duplicate Bib ID']]\nbibs = [['Bib ID']]\nmfhds = [['MFHD ID']]\nitems = [['Item ID']]\nwb1 = openpyxl.Workbook()\nws1 = wb1.active\nws1.title = 'Duplicate Bibs'\nws2 = wb1.create_sheet(index=1, title='IDs Added')\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-4": "import re\nimport openpyxl\nfname = input('Enter input file, including extension: ')\nfout = input('Enter output file, without extension: ')\nfh = open(fname, 'r')\nduplicates = [['Duplicate Bib ID']]\nbibs = [['Bib ID']]\nmfhds = [['MFHD ID']]\nitems = [['Item ID']]\nwb1 = openpyxl.Workbook()\nws1 = wb1.active\nws1.title = 'Duplicate Bibs'\nws2 = wb1.create_sheet(index=1, title='IDs Added')\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-5": "#This script reads through a Voyager import log and outputs duplicate bib IDs as well as the IDs of bibs, mfhds, and items created.\n\n#import regular expressions and openpyxl\nimport re\nimport openpyxl\n\n# prompt for file names\nfname = input(\"Enter input file, including extension: \")\nfout = input(\"Enter output file, without extension: \")\nfh = open(fname, \"r\")\n\n# set up lists\nduplicates = [[\"Duplicate Bib ID\"]]\nbibs = [[\"Bib ID\"]]\nmfhds = [[\"MFHD ID\"]]\nitems = [[\"Item ID\"]]\n\n# create and open workbook with two sheets\nwb1=openpyxl.Workbook()\nws1=wb1.active\nws1.title = \"Duplicate Bibs\"\nws2 = wb1.create_sheet(index=1, title=\"IDs Added\")\n\n# read through file, extract the line after the line starting with BibID & rank and write to lists\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate (lines) :\n line = line.rstrip()\n if line.startswith(\"\tBibID & rank\") and \\\n n_lines > i + 2 and lines[i + 2].startswith(\"\") :\n bibline = re.findall(r'\\d+\\s-\\s', lines[i+1])\n dupeid = re.findall(r'\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith(\"\tAdding Bib\") :\n line = re.findall(r'\\d+',str(line))\n bibs.append(line)\n elif line.startswith(\"MFHD_ID \") :\n line = re.findall(r'\\d+',str(line))\n mfhds.append(line)\n elif line.startswith(\"ITEM_ID \") :\n line = re.findall(r'\\d+',str(line))\n items.append(line)\n else :\n continue\n\n# write the lists to columns in the spreadsheet and save\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0,len(bibs)):\n ws2.cell(row=r+1,column=1).value=bibs[r][0]\nfor r in range(0,len(mfhds)):\n ws2.cell(row=r+1,column=2).value=mfhds[r][0]\nfor r in range(0,len(items)):\n ws2.cell(row=r+1,column=3).value=items[r][0]\nwb1.save(fout + \".xlsx\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from template.db import Database
from template.query import Query
import os
'''
READ ME!!
Before using this demo, be sure that the Tail_Const is set to a value high enough
to guaranteed that all updates are contained within the same block.
config.py -> TAIL_CONST = 4
This program is meant to run sequentially through all parts starting with an empty ECS165
directory.
'''
db = Database()
db.open("ECS165")
print(db)
g_table = db.get_table('Grades')
q = Query(g_table)
print("Merge Start")
q.table.merge(0)
print("Merge End")
db.close()
|
normal
|
{
"blob_id": "8f5b7711d913c7375d6816dd94731f1ce5ca1a62",
"index": 8289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.open('ECS165')\nprint(db)\n<mask token>\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-3": "<mask token>\ndb = Database()\ndb.open('ECS165')\nprint(db)\ng_table = db.get_table('Grades')\nq = Query(g_table)\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-4": "from template.db import Database\nfrom template.query import Query\nimport os\n<mask token>\ndb = Database()\ndb.open('ECS165')\nprint(db)\ng_table = db.get_table('Grades')\nq = Query(g_table)\nprint('Merge Start')\nq.table.merge(0)\nprint('Merge End')\ndb.close()\n",
"step-5": "from template.db import Database\r\nfrom template.query import Query\r\nimport os\r\n\r\n'''\r\nREAD ME!!\r\n Before using this demo, be sure that the Tail_Const is set to a value high enough\r\n to guaranteed that all updates are contained within the same block.\r\n config.py -> TAIL_CONST = 4\r\n\r\n This program is meant to run sequentially through all parts starting with an empty ECS165\r\n directory.\r\n'''\r\ndb = Database()\r\ndb.open(\"ECS165\")\r\nprint(db)\r\ng_table = db.get_table('Grades')\r\nq = Query(g_table)\r\n\r\nprint(\"Merge Start\")\r\nq.table.merge(0)\r\nprint(\"Merge End\")\r\n\r\ndb.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for x in array:
print(x)
<|reserved_special_token_1|>
array = [1, 2, 3, 4, 5]
for x in array:
print(x)
|
flexible
|
{
"blob_id": "224e13331ad93278f47a5582bbd24208d9ce5dcc",
"index": 3705,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in array:\n print(x)\n",
"step-3": "array = [1, 2, 3, 4, 5]\nfor x in array:\n print(x)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from .models import CNNClassifier, load_weights, LastLayer_Alexnet, classes, MyResNet
from .transforms import image_transforms, tensor_transform
from .utils import newest_model, Dataset, load_data
|
normal
|
{
"blob_id": "17781ae5e9c72232fbc11c7eda7daeaeb0fa3670",
"index": 9277,
"step-1": "<mask token>\n",
"step-2": "from .models import CNNClassifier, load_weights, LastLayer_Alexnet, classes, MyResNet\nfrom .transforms import image_transforms, tensor_transform\nfrom .utils import newest_model, Dataset, load_data\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
<|reserved_special_token_1|>
"""
Copyright (c) 2007 by the Pallets team.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
#
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
#
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
#
# `get_host` comes from `werkzeug.wsgi.get_host`
# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145
#
def get_host(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
rv = environ["HTTP_X_FORWARDED_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
|
flexible
|
{
"blob_id": "53cd9d5a79e97bb1af69446a82c747248c3cc298",
"index": 1367,
"step-1": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-4": "<mask token>\nfrom sentry_sdk._compat import iteritems\nfrom sentry_sdk._types import TYPE_CHECKING\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-5": "\"\"\"\nCopyright (c) 2007 by the Pallets team.\n\nSome rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND\nCONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,\nBUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\nCOPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\nNOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\nUSE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\nTHIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n\"\"\"\n\nfrom sentry_sdk._compat import iteritems\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\n#\n# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`\n# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361\n#\n# We need this function because Django does not give us a \"pure\" http header\n# dict. So we might as well use it for all WSGI integrations.\n#\ndef _get_headers(environ):\n # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith(\"HTTP_\") and key not in (\n \"HTTP_CONTENT_TYPE\",\n \"HTTP_CONTENT_LENGTH\",\n ):\n yield key[5:].replace(\"_\", \"-\").title(), value\n elif key in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n yield key.replace(\"_\", \"-\").title(), value\n\n\n#\n# `get_host` comes from `werkzeug.wsgi.get_host`\n# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145\n#\ndef get_host(environ, use_x_forwarded_for=False):\n # type: (Dict[str, str], bool) -> str\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and \"HTTP_X_FORWARDED_HOST\" in environ:\n rv = environ[\"HTTP_X_FORWARDED_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"HTTP_HOST\"):\n rv = environ[\"HTTP_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"SERVER_NAME\"):\n rv = environ[\"SERVER_NAME\"]\n if (environ[\"wsgi.url_scheme\"], environ[\"SERVER_PORT\"]) not in (\n (\"https\", \"443\"),\n (\"http\", \"80\"),\n ):\n rv += \":\" + environ[\"SERVER_PORT\"]\n else:\n # In spite of the WSGI spec, SERVER_NAME might not be present.\n rv = \"unknown\"\n\n return rv\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TcpTestProtocol(asyncio.Protocol):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
self.connection_to_server_made(transport)
else:
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s', self
._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s', self.
_sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)
if self._no_delay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self.
_window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self._window)
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',
tx_buf, rx_buf)
self._stream.connection_established(self)
<|reserved_special_token_0|>
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id,
exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TcpTestProtocol(asyncio.Protocol):
<|reserved_special_token_0|>
def __init__(self, test_stream=None, no_delay=False, window=None,
server=None):
"""
Initialize TCP Protocol object.
"""
self._transport = None
self._socket = None
self._stream = test_stream
self._logger = logging.getLogger('py3iperf3')
self._sock_id = None
self._no_delay = no_delay
self._window = window
self._server = server
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
self.connection_to_server_made(transport)
else:
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s', self
._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s', self.
_sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)
if self._no_delay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self.
_window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self._window)
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',
tx_buf, rx_buf)
self._stream.connection_established(self)
<|reserved_special_token_0|>
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id,
exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
def resume_writing(self):
"""
Resume writing callback from transport.
"""
self._stream.resume_writing()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TcpTestProtocol(asyncio.Protocol):
<|reserved_special_token_0|>
def __init__(self, test_stream=None, no_delay=False, window=None,
server=None):
"""
Initialize TCP Protocol object.
"""
self._transport = None
self._socket = None
self._stream = test_stream
self._logger = logging.getLogger('py3iperf3')
self._sock_id = None
self._no_delay = no_delay
self._window = window
self._server = server
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
self.connection_to_server_made(transport)
else:
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s', self
._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s', self.
_sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)
if self._no_delay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self.
_window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self._window)
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',
tx_buf, rx_buf)
self._stream.connection_established(self)
def data_received(self, data):
"""
Data received call-back.
"""
if self._stream is None:
self._server.control_data_received(self, data)
else:
self._stream.data_received(data)
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id,
exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
def resume_writing(self):
"""
Resume writing callback from transport.
"""
self._stream.resume_writing()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TcpTestProtocol(asyncio.Protocol):
"""
Extension of asyncio protocol for TCP data
"""
def __init__(self, test_stream=None, no_delay=False, window=None,
server=None):
"""
Initialize TCP Protocol object.
"""
self._transport = None
self._socket = None
self._stream = test_stream
self._logger = logging.getLogger('py3iperf3')
self._sock_id = None
self._no_delay = no_delay
self._window = window
self._server = server
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
self.connection_to_server_made(transport)
else:
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s', self
._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s', self.
_sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)
if self._no_delay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self.
_window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,
self._window)
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',
tx_buf, rx_buf)
self._stream.connection_established(self)
def data_received(self, data):
"""
Data received call-back.
"""
if self._stream is None:
self._server.control_data_received(self, data)
else:
self._stream.data_received(data)
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id,
exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
def resume_writing(self):
"""
Resume writing callback from transport.
"""
self._stream.resume_writing()
<|reserved_special_token_1|>
"""
Python asyncio Protocol extension for TCP use.
"""
import asyncio
import logging
import socket
class TcpTestProtocol(asyncio.Protocol):
"""
Extension of asyncio protocol for TCP data
"""
def __init__(self, test_stream=None, no_delay=False, window=None, server=None):
"""
Initialize TCP Protocol object.
"""
self._transport = None
self._socket = None
self._stream = test_stream
self._logger = logging.getLogger('py3iperf3')
self._sock_id = None
self._no_delay = no_delay
self._window = window
self._server = server
@property
def socket_id(self):
"""Return socket id"""
return self._sock_id
def set_owner(self, owner, is_stream=False):
"""Update owner to test from server once ready"""
if is_stream:
self._logger.debug('TCP Proto Stream is set!')
self._stream = owner
else:
self._server = owner
def connection_made(self, transport):
"""Connection established call-back"""
self._transport = transport
self._socket = transport.get_extra_info('socket')
self._sock_id = self._socket.fileno()
if self._server is None:
# This is client connecting to the server
self.connection_to_server_made(transport)
else:
# This is incomming connection from the client
self.connection_from_client(transport)
def connection_from_client(self, transport):
"""Connection from the client established to the server"""
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] incomming connection from %s port %s',
self._sock_id, peer_data[0], peer_data[1])
self._server.tcp_connection_established(self)
def connection_to_server_made(self, transport):
"""Connecton to the server established"""
local_data = self._socket.getsockname()
peer_data = transport.get_extra_info('peername')
self._logger.info('[%s] local %s:%s connected to %s:%s',
self._sock_id, local_data[0], local_data[1],
peer_data[0], peer_data[1])
# No delay OFF -> Nagle's alg used
self._socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
0)
# If required - turn off Nagle's alg (No Delay ON)
if self._no_delay:
self._socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
# Set Socket TX/RX buffer sizes if specified
if self._window:
self._logger.debug('Setting socket buffer sizes to %s B', self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window)
# Print current buf sizes:
rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf)
self._stream.connection_established(self)
def data_received(self, data):
"""
Data received call-back.
"""
# Inform the server that we have data until the stream is ready
if self._stream is None:
self._server.control_data_received(self, data)
else:
self._stream.data_received(data)
def connection_lost(self, exc):
"""
Callback on connection lost.
"""
if self._stream.done:
# Stream is done, no need to panic
pass
else:
self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)
def send_data(self, data):
"""
Write data to transport.
"""
self._transport.write(data)
def pause_writing(self):
"""
Pause writing callback from transport.
"""
self._stream.pause_writing()
def resume_writing(self):
"""
Resume writing callback from transport.
"""
self._stream.resume_writing()
|
flexible
|
{
"blob_id": "9f0e286268732e8cabb028b7c84f5ba72a6e8528",
"index": 3068,
"step-1": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n <mask token>\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n <mask token>\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n <mask token>\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n",
"step-3": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n <mask token>\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n",
"step-4": "<mask token>\n\n\nclass TcpTestProtocol(asyncio.Protocol):\n \"\"\"\n Extension of asyncio protocol for TCP data\n \"\"\"\n\n def __init__(self, test_stream=None, no_delay=False, window=None,\n server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n if self._server is None:\n self.connection_to_server_made(transport)\n else:\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s', self\n ._sock_id, peer_data[0], peer_data[1])\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] local %s:%s connected to %s:%s', self.\n _sock_id, local_data[0], local_data[1], peer_data[0], peer_data[1])\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n if self._no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self.\n _window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,\n self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF,\n self._window)\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;',\n tx_buf, rx_buf)\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id,\n exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()\n",
"step-5": "\"\"\"\nPython asyncio Protocol extension for TCP use.\n\"\"\"\nimport asyncio\nimport logging\nimport socket\n\nclass TcpTestProtocol(asyncio.Protocol):\n \"\"\"\n Extension of asyncio protocol for TCP data\n \"\"\"\n\n def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n \"\"\"\n Initialize TCP Protocol object.\n \"\"\"\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server\n\n @property\n def socket_id(self):\n \"\"\"Return socket id\"\"\"\n return self._sock_id\n\n def set_owner(self, owner, is_stream=False):\n \"\"\"Update owner to test from server once ready\"\"\"\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner\n\n def connection_made(self, transport):\n \"\"\"Connection established call-back\"\"\"\n\n self._transport = transport\n self._socket = transport.get_extra_info('socket')\n self._sock_id = self._socket.fileno()\n\n if self._server is None:\n # This is client connecting to the server\n self.connection_to_server_made(transport)\n else:\n # This is incomming connection from the client\n self.connection_from_client(transport)\n\n def connection_from_client(self, transport):\n \"\"\"Connection from the client established to the server\"\"\"\n peer_data = transport.get_extra_info('peername')\n self._logger.info('[%s] incomming connection from %s port %s',\n self._sock_id, peer_data[0], peer_data[1])\n\n self._server.tcp_connection_established(self)\n\n def connection_to_server_made(self, transport):\n \"\"\"Connecton to the server established\"\"\"\n\n local_data = self._socket.getsockname()\n peer_data = transport.get_extra_info('peername')\n\n self._logger.info('[%s] local %s:%s connected to %s:%s',\n self._sock_id, local_data[0], local_data[1],\n peer_data[0], peer_data[1])\n\n # No delay OFF -> Nagle's alg used\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 0)\n\n # If required - turn off Nagle's alg (No Delay ON)\n if self._no_delay:\n self._socket.setsockopt(\n socket.IPPROTO_TCP,\n socket.TCP_NODELAY,\n 1)\n\n # Set Socket TX/RX buffer sizes if specified\n if self._window:\n self._logger.debug('Setting socket buffer sizes to %s B', self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self._window)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self._window)\n\n # Print current buf sizes:\n rx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n tx_buf = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)\n\n self._logger.debug('Socket TX buffer: %s B; RX buffer: %s B;', tx_buf, rx_buf)\n\n self._stream.connection_established(self)\n\n def data_received(self, data):\n \"\"\"\n Data received call-back.\n \"\"\"\n # Inform the server that we have data until the stream is ready\n if self._stream is None:\n self._server.control_data_received(self, data)\n else:\n self._stream.data_received(data)\n\n def connection_lost(self, exc):\n \"\"\"\n Callback on connection lost.\n \"\"\"\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)\n\n def send_data(self, data):\n \"\"\"\n Write data to transport.\n \"\"\"\n self._transport.write(data)\n\n def pause_writing(self):\n \"\"\"\n Pause writing callback from transport.\n \"\"\"\n self._stream.pause_writing()\n\n def resume_writing(self):\n \"\"\"\n Resume writing callback from transport.\n \"\"\"\n self._stream.resume_writing()",
"step-ids": [
9,
11,
12,
13,
15
]
}
|
[
9,
11,
12,
13,
15
] |
from sonosscripts import common
from sonosscripts.common import round_nearest
def run(_):
parser = common.get_argument_parser()
parser.add_argument("--step", help="volume step", type=int, default=5)
parsed_args = parser.parse_args()
sonos = common.get_sonos(parsed_args)
step = parsed_args.step
current_volume = sonos.volume
new_volume = current_volume + step
new_volume = round_nearest(new_volume, step)
new_volume = min(common.max_volume, new_volume)
new_volume = max(common.min_volume, new_volume)
sonos.volume = new_volume
if new_volume != current_volume:
common.send_notification(f"Changed volume from {current_volume} to {new_volume}", common.get_icon(new_volume),
common.process_volume)
else:
if new_volume == common.max_volume:
common.send_notification("Volume is already at maximum", common.get_icon(new_volume), common.process_volume)
else:
common.send_notification("Volume is already at minimum", common.get_icon(new_volume), common.process_volume)
|
normal
|
{
"blob_id": "6e78dee46276f738197ba6796fe1a027ab743354",
"index": 1769,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument('--step', help='volume step', type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(\n f'Changed volume from {current_volume} to {new_volume}', common\n .get_icon(new_volume), common.process_volume)\n elif new_volume == common.max_volume:\n common.send_notification('Volume is already at maximum', common.\n get_icon(new_volume), common.process_volume)\n else:\n common.send_notification('Volume is already at minimum', common.\n get_icon(new_volume), common.process_volume)\n",
"step-3": "from sonosscripts import common\nfrom sonosscripts.common import round_nearest\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument('--step', help='volume step', type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(\n f'Changed volume from {current_volume} to {new_volume}', common\n .get_icon(new_volume), common.process_volume)\n elif new_volume == common.max_volume:\n common.send_notification('Volume is already at maximum', common.\n get_icon(new_volume), common.process_volume)\n else:\n common.send_notification('Volume is already at minimum', common.\n get_icon(new_volume), common.process_volume)\n",
"step-4": "from sonosscripts import common\nfrom sonosscripts.common import round_nearest\n\n\ndef run(_):\n parser = common.get_argument_parser()\n parser.add_argument(\"--step\", help=\"volume step\", type=int, default=5)\n parsed_args = parser.parse_args()\n sonos = common.get_sonos(parsed_args)\n step = parsed_args.step\n current_volume = sonos.volume\n new_volume = current_volume + step\n new_volume = round_nearest(new_volume, step)\n new_volume = min(common.max_volume, new_volume)\n new_volume = max(common.min_volume, new_volume)\n sonos.volume = new_volume\n if new_volume != current_volume:\n common.send_notification(f\"Changed volume from {current_volume} to {new_volume}\", common.get_icon(new_volume),\n common.process_volume)\n else:\n if new_volume == common.max_volume:\n common.send_notification(\"Volume is already at maximum\", common.get_icon(new_volume), common.process_volume)\n else:\n common.send_notification(\"Volume is already at minimum\", common.get_icon(new_volume), common.process_volume)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__author__ = 'samar'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__author__ = 'samar'
import mv_details
import product
|
flexible
|
{
"blob_id": "7ac53779a98b6e4b236b1e81742163d2c610a274",
"index": 4556,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'samar'\n<mask token>\n",
"step-3": "__author__ = 'samar'\nimport mv_details\nimport product\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import requests,cv2,numpy,time,imutils
class imageAnalyzer():
def __init__(self,
roverName="Rover03",
url="http://192.168.1.10:5000/api/",
temp_img_path = "./temp",
):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg
temp = open(self.temp_img_path + str(img_number) + ".jpeg", "wb")
img = requests.get(self.url + "/image")
temp.write(img.content)
temp.close()
def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds
img = cv2.imread(self.temp_img_path + str(img_number) + ".jpeg")
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img,(7,7),8)
except:
pass
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0],thresholds[1])
return ret,orig
def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion
con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio
for b in bound:
cv2.rectangle(orig,b,color=(0,0,255),thickness=2)
cv2.imwrite("vis{}.jpg".format(0),orig)
return bound
def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))
distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)
return distances
def capture(self,temp_image=0,db_file="temp_duck_boxes.txt"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, "w")
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
analyzer = imageAnalyzer()
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
|
normal
|
{
"blob_id": "7d3264e9a90ebd72439f77983cbf4f9755048a85",
"index": 4300,
"step-1": "<mask token>\n\n\nclass imageAnalyzer:\n <mask token>\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-3": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-4": "import requests, cv2, numpy, time, imutils\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-5": "import requests,cv2,numpy,time,imutils\r\n\r\nclass imageAnalyzer():\r\n\r\n def __init__(self,\r\n roverName=\"Rover03\",\r\n url=\"http://192.168.1.10:5000/api/\",\r\n temp_img_path = \"./temp\",\r\n ):\r\n\r\n self.url = url + roverName\r\n\r\n self.temp_img_path = temp_img_path\r\n\r\n def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg\r\n\r\n temp = open(self.temp_img_path + str(img_number) + \".jpeg\", \"wb\")\r\n\r\n img = requests.get(self.url + \"/image\")\r\n\r\n temp.write(img.content)\r\n\r\n temp.close()\r\n\r\n def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds\r\n\r\n img = cv2.imread(self.temp_img_path + str(img_number) + \".jpeg\")\r\n\r\n orig = numpy.copy(img)\r\n\r\n try:\r\n\r\n img = cv2.GaussianBlur(img,(7,7),8)\r\n\r\n except:\r\n\r\n pass\r\n\r\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n ret = cv2.inRange(hsv, thresholds[0],thresholds[1])\r\n\r\n return ret,orig\r\n\r\n def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion\r\n\r\n con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n con = imutils.grab_contours(con)\r\n\r\n if orig.any():\r\n\r\n cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)\r\n\r\n bound = []\r\n\r\n for c in con:\r\n\r\n bound.append(cv2.boundingRect(c))\r\n\r\n bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio\r\n\r\n for b in bound:\r\n\r\n cv2.rectangle(orig,b,color=(0,0,255),thickness=2)\r\n\r\n cv2.imwrite(\"vis{}.jpg\".format(0),orig)\r\n\r\n return bound\r\n\r\n def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.\r\n\r\n distances = {}\r\n\r\n print(duckie_boxes)\r\n\r\n for box in duckie_boxes:\r\n\r\n distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))\r\n\r\n distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)\r\n\r\n return distances\r\n\r\n def capture(self,temp_image=0,db_file=\"temp_duck_boxes.txt\"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file\r\n\r\n self.getImage(temp_image)\r\n\r\n ret = self.analyzeHSV(temp_image)\r\n\r\n boxes = self.findBoundingBoxes(ret[0], ret[1])\r\n\r\n duck_box_file = open(db_file, \"w\")\r\n\r\n dist = analyzer.approx_distance(boxes)\r\n\r\n duck_box_file.write(str(dist))\r\n\r\n duck_box_file.close()\r\n\r\n return boxes, dist\r\n\r\n\r\nanalyzer = imageAnalyzer()\r\n\r\nwhile True:\r\n\r\n boxes, dist = analyzer.capture()\r\n\r\n time.sleep(0.5)\r\n\r\n\r\n\r\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = 20, 9, 10
cs = 5, 3, 2
ff = 'HDF5'
outdir_path = './outdir'
test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,
5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *
3 * 2 * 5 * 3 * 7]
nb_chunks = 4 * 3 * 5
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob('*.hdf5'):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
<|reserved_special_token_1|>
import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
bytes_per_voxel = 1
R = 10, 9, 10
cs = 5, 3, 2
partition = 2, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5 * 3 * 2
assert brs == 5 * 3 * 2 * 5
assert bss == 5 * 3 * 2 * 5 * 3
def test_get_strategy():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *
2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):
2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
bytes_per_voxel = 1
R = 20, 9, 10
cs = 5, 3, 2
partition = 4, 3, 5
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel
test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *
3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,
(5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *
5 * 3 * 7): 1}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,
cs, bs, brs, bss, partition, R, bytes_per_voxel)
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = 20, 9, 10
cs = 5, 3, 2
ff = 'HDF5'
outdir_path = './outdir'
test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,
5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *
3 * 2 * 5 * 3 * 7]
nb_chunks = 4 * 3 * 5
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob('*.hdf5'):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
<|reserved_special_token_1|>
import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
# in C order
bytes_per_voxel = 1
R = (10,9,10)
cs = (5,3,2)
partition = (2,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5*3*2
assert brs == 5*3*2*5
assert bss == 5*3*2*5*3
def test_get_strategy():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {
5*2*3: 0, # 1 block
5*2*3*4: 0, # 4 blocks
5*2*3*5: 1, # 1 row
5*2*3*5*2: 1, # 2 rows
5*2*3*5*3: 2, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 2, # whole img
5*2*3*5*3*7: 2, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel
test_case = {
5*2*3: 4*3*5, # 1 block
5*2*3*4: 4*3*2, # 4 blocks
5*2*3*5: 4*3, # 1 row
5*2*3*5*2: 4*2, # 2 rows
5*2*3*5*3: 4, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 1, # whole img
5*2*3*5*3*7: 1, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)
# test number of buffers
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = (20,9,10)
cs = (5,3,2)
ff = 'HDF5'
outdir_path = './outdir'
test_case = [
5*3*2, # 1 block
5*3*2*4, # 4 blocks
5*3*2*5, # 1 row
5*3*2*5*2, # 2 rows
5*3*2*5*3, # 1 slice
5*3*2*5*3*3, # 3 slices
5*3*2*5*3*4, # whole img
5*3*2*5*3*7, # whole img (more mem than necessary)
]
nb_chunks = 4*3*5
# create input array
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob("*.hdf5"):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
|
flexible
|
{
"blob_id": "6dd11f71e514a46462bf0b97ddac9ea474e86ad0",
"index": 366,
"step-1": "<mask token>\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-4": "import os, glob\nimport numpy as np\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-5": "import os, glob\nimport numpy as np\n\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n # in C order\n bytes_per_voxel = 1\n R = (10,9,10)\n cs = (5,3,2)\n partition = (2,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n\n assert bs == 5*3*2\n assert brs == 5*3*2*5\n assert bss == 5*3*2*5*3\n\n\ndef test_get_strategy():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n \n test_case = {\n 5*2*3: 0, # 1 block \n 5*2*3*4: 0, # 4 blocks \n 5*2*3*5: 1, # 1 row \n 5*2*3*5*2: 1, # 2 rows\n 5*2*3*5*3: 2, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 2, # whole img\n 5*2*3*5*3*7: 2, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel\n \n test_case = {\n 5*2*3: 4*3*5, # 1 block \n 5*2*3*4: 4*3*2, # 4 blocks \n 5*2*3*5: 4*3, # 1 row \n 5*2*3*5*2: 4*2, # 2 rows\n 5*2*3*5*3: 4, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 1, # whole img\n 5*2*3*5*3*7: 1, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)\n\n # test number of buffers\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = (20,9,10)\n cs = (5,3,2)\n ff = 'HDF5'\n outdir_path = './outdir'\n\n test_case = [\n 5*3*2, # 1 block \n 5*3*2*4, # 4 blocks \n 5*3*2*5, # 1 row \n 5*3*2*5*2, # 2 rows\n 5*3*2*5*3, # 1 slice \n 5*3*2*5*3*3, # 3 slices \n 5*3*2*5*3*4, # whole img\n 5*3*2*5*3*7, # whole img (more mem than necessary)\n ]\n\n nb_chunks = 4*3*5\n\n # create input array\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n \n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob(\"*.hdf5\"):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import pandas as pd
#1. 读入数据
#从本地读入“wheat.csv”文件,指定index_col参数为00,即将第一列作为每行的索引。用head()函数查看前几行数据。
data = pd.read_csv("wheat.csv",index_col=0)
print(data.head(6))
#2. 缺失值处理
#该数据集中包含部分缺失值,在模型训练时会遇到特征值为空的问题,故对缺失值进行处理,
## 用DataFrame的fillna方法进行缺失值填充,填充值为用mean方法得到的该列平均值。
data = data.fillna(data.mean())
print(data)
#3. 划分数据集从sklearn.model_selection模块导入train_test_split函数,
# 并将返回值放入变量X_train、X_test、y_train和y_test之中,指定参数test_size=0.3,
# 即将70%的数据样本作为训练集,将30%的数据样本作为测试集。输出训练集和测试集大小。
from sklearn.model_selection import train_test_split
X = data.iloc[:,:7]
y = data.iloc[:,7]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=300)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
#4.构建随机森林模型并训练
#从sklearn.ensemble模块中导入RandomForestClassifier函数,
## 并用其构建随机森林分类模型,指定n_estimators参数为1010,
# 即使用1010棵决策树构建模型。将训练集传入模型进行模型训练。
from sklearn.ensemble import RandomForestClassifier
model= RandomForestClassifier(n_estimators=10)
model.fit(X_train, y_train)
#5.利用随机森林模型预测分类
#运用predict方法预测测试集中样本的分类,该方法返回一个预测结果数组,输出预测的分类结果。
y_pred = model.predict(X_test)
print("Predictions of test set:\n%s"%y_pred)
#6. 查看各特征重要性
#用feature_importances_属性查看每个特征的重要性,相对而言第11、22、55、77个特征在随机森林分类中的重要性强一些。
print(model.feature_importances_)
#7. 评估模型准确率
#利用score方法计算模型在测试集上的预测准确率。
print(model.score(X_test,y_test))
#8. 调整随机森林中的树木数量
#随机森林中的数目数量是模型中较为重要的参数,
#通过指定n_estimators参数进行设置,设置为30时模型的性能较10时有所提升,
#但设置为100时,其准确度不但没有提升已不明显,甚至可能下降,可能已经过拟合。
model= RandomForestClassifier(n_estimators=30)
model.fit(X_train, y_train)
print(model.score(X_test,y_test))
#9. 与决策树分类进行比较
#决策树与随机森林在分类效果上进行比较,
# 决策树模型的分类准确率与仅含单棵决策树的随机森林类似,
# 但是总体上随机森林的准确度要高于决策树,但其模型的解释性较差,无法轻易得到树的基本结构。
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
print(clf.score(X_test,y_test))
model= RandomForestClassifier(n_estimators=1)
model.fit(X_train, y_train)
print(model.score(X_test,y_test))
|
normal
|
{
"blob_id": "7da2be1b530faa8ce9a8570247887e8e0d74c310",
"index": 711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(data.head(6))\n<mask token>\nprint(data)\n<mask token>\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n<mask token>\nmodel.fit(X_train, y_train)\n<mask token>\nprint(\"\"\"Predictions of test set:\n%s\"\"\" % y_pred)\nprint(model.feature_importances_)\nprint(model.score(X_test, y_test))\n<mask token>\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\n<mask token>\nprint(clf.score(X_test, y_test))\n<mask token>\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\n",
"step-3": "<mask token>\ndata = pd.read_csv('wheat.csv', index_col=0)\nprint(data.head(6))\ndata = data.fillna(data.mean())\nprint(data)\n<mask token>\nX = data.iloc[:, :7]\ny = data.iloc[:, 7]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=300)\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n<mask token>\nmodel = RandomForestClassifier(n_estimators=10)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint(\"\"\"Predictions of test set:\n%s\"\"\" % y_pred)\nprint(model.feature_importances_)\nprint(model.score(X_test, y_test))\nmodel = RandomForestClassifier(n_estimators=30)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\n<mask token>\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, y_train)\nprint(clf.score(X_test, y_test))\nmodel = RandomForestClassifier(n_estimators=1)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\n",
"step-4": "import pandas as pd\ndata = pd.read_csv('wheat.csv', index_col=0)\nprint(data.head(6))\ndata = data.fillna(data.mean())\nprint(data)\nfrom sklearn.model_selection import train_test_split\nX = data.iloc[:, :7]\ny = data.iloc[:, 7]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=300)\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier(n_estimators=10)\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nprint(\"\"\"Predictions of test set:\n%s\"\"\" % y_pred)\nprint(model.feature_importances_)\nprint(model.score(X_test, y_test))\nmodel = RandomForestClassifier(n_estimators=30)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, y_train)\nprint(clf.score(X_test, y_test))\nmodel = RandomForestClassifier(n_estimators=1)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test, y_test))\n",
"step-5": "import pandas as pd\n#1. 读入数据\n#从本地读入“wheat.csv”文件,指定index_col参数为00,即将第一列作为每行的索引。用head()函数查看前几行数据。\ndata = pd.read_csv(\"wheat.csv\",index_col=0)\nprint(data.head(6))\n\n#2. 缺失值处理\n#该数据集中包含部分缺失值,在模型训练时会遇到特征值为空的问题,故对缺失值进行处理,\n## 用DataFrame的fillna方法进行缺失值填充,填充值为用mean方法得到的该列平均值。\ndata = data.fillna(data.mean())\nprint(data)\n\n\n#3. 划分数据集从sklearn.model_selection模块导入train_test_split函数,\n# 并将返回值放入变量X_train、X_test、y_train和y_test之中,指定参数test_size=0.3,\n# 即将70%的数据样本作为训练集,将30%的数据样本作为测试集。输出训练集和测试集大小。\nfrom sklearn.model_selection import train_test_split\nX = data.iloc[:,:7]\ny = data.iloc[:,7]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=300)\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\n#4.构建随机森林模型并训练\n#从sklearn.ensemble模块中导入RandomForestClassifier函数,\n## 并用其构建随机森林分类模型,指定n_estimators参数为1010,\n# 即使用1010棵决策树构建模型。将训练集传入模型进行模型训练。\nfrom sklearn.ensemble import RandomForestClassifier\nmodel= RandomForestClassifier(n_estimators=10)\nmodel.fit(X_train, y_train)\n\n#5.利用随机森林模型预测分类\n#运用predict方法预测测试集中样本的分类,该方法返回一个预测结果数组,输出预测的分类结果。\ny_pred = model.predict(X_test)\nprint(\"Predictions of test set:\\n%s\"%y_pred)\n\n#6. 查看各特征重要性\n#用feature_importances_属性查看每个特征的重要性,相对而言第11、22、55、77个特征在随机森林分类中的重要性强一些。\nprint(model.feature_importances_)\n\n#7. 评估模型准确率\n#利用score方法计算模型在测试集上的预测准确率。\nprint(model.score(X_test,y_test))\n\n#8. 调整随机森林中的树木数量\n#随机森林中的数目数量是模型中较为重要的参数,\n#通过指定n_estimators参数进行设置,设置为30时模型的性能较10时有所提升,\n#但设置为100时,其准确度不但没有提升已不明显,甚至可能下降,可能已经过拟合。\nmodel= RandomForestClassifier(n_estimators=30)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test,y_test))\n\n#9. 与决策树分类进行比较\n#决策树与随机森林在分类效果上进行比较,\n# 决策树模型的分类准确率与仅含单棵决策树的随机森林类似,\n# 但是总体上随机森林的准确度要高于决策树,但其模型的解释性较差,无法轻易得到树的基本结构。\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, y_train)\nprint(clf.score(X_test,y_test))\n\nmodel= RandomForestClassifier(n_estimators=1)\nmodel.fit(X_train, y_train)\nprint(model.score(X_test,y_test))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.2.3 on 2021-08-26 09:18
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('restaurant_name', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('district', models.CharField(max_length=200)),
('rating', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('photo', models.ImageField(upload_to='uploads')),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('adult', models.IntegerField()),
('entry_date', models.DateTimeField()),
('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurant.restaurant')),
],
),
]
|
normal
|
{
"blob_id": "1ea61ab4003de80ffe9fb3e284b6686d4bf20b15",
"index": 787,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Restaurant', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('restaurant_name', models.CharField(\n max_length=200)), ('city', models.CharField(max_length=200)), (\n 'district', models.CharField(max_length=200)), ('rating', models.\n FloatField(validators=[django.core.validators.MinValueValidator(0),\n django.core.validators.MaxValueValidator(5)])), ('photo', models.\n ImageField(upload_to='uploads'))]), migrations.CreateModel(name=\n 'Reservation', fields=[('id', models.BigAutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('adult',\n models.IntegerField()), ('entry_date', models.DateTimeField()), (\n 'restaurant', models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='restaurant.restaurant'))])]\n",
"step-4": "import django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Restaurant', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('restaurant_name', models.CharField(\n max_length=200)), ('city', models.CharField(max_length=200)), (\n 'district', models.CharField(max_length=200)), ('rating', models.\n FloatField(validators=[django.core.validators.MinValueValidator(0),\n django.core.validators.MaxValueValidator(5)])), ('photo', models.\n ImageField(upload_to='uploads'))]), migrations.CreateModel(name=\n 'Reservation', fields=[('id', models.BigAutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('adult',\n models.IntegerField()), ('entry_date', models.DateTimeField()), (\n 'restaurant', models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='restaurant.restaurant'))])]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-08-26 09:18\n\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Restaurant',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('restaurant_name', models.CharField(max_length=200)),\n ('city', models.CharField(max_length=200)),\n ('district', models.CharField(max_length=200)),\n ('rating', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),\n ('photo', models.ImageField(upload_to='uploads')),\n ],\n ),\n migrations.CreateModel(\n name='Reservation',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('adult', models.IntegerField()),\n ('entry_date', models.DateTimeField()),\n ('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurant.restaurant')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestRedshiftCreateClusterTrigger:
<|reserved_special_token_0|>
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
<|reserved_special_token_0|>
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = 'test-cluster'
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = 'test-aws-id'
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
<|reserved_special_token_1|>
from __future__ import annotations
import sys
import pytest
from airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger
from airflow.triggers.base import TriggerEvent
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = 'test-cluster'
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = 'test-aws-id'
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
<|reserved_special_token_1|>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import sys
import pytest
from airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger
from airflow.triggers.base import TriggerEvent
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = "test-cluster"
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = "test-aws-id"
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
poll_interval=TEST_POLL_INTERVAL,
max_attempt=TEST_MAX_ATTEMPT,
aws_conn_id=TEST_AWS_CONN_ID,
)
class_path, args = redshift_create_cluster_trigger.serialize()
assert (
class_path
== "airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger"
)
assert args["cluster_identifier"] == TEST_CLUSTER_IDENTIFIER
assert args["poll_interval"] == str(TEST_POLL_INTERVAL)
assert args["max_attempt"] == str(TEST_MAX_ATTEMPT)
assert args["aws_conn_id"] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn")
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
poll_interval=TEST_POLL_INTERVAL,
max_attempt=TEST_MAX_ATTEMPT,
aws_conn_id=TEST_AWS_CONN_ID,
)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({"status": "success", "message": "Cluster Created"})
|
flexible
|
{
"blob_id": "5c291dbc241a80e7f2625ba338a4b9b3a3f3b2d0",
"index": 1119,
"step-1": "<mask token>\n\n\nclass TestRedshiftCreateClusterTrigger:\n <mask token>\n\n @pytest.mark.asyncio\n @async_mock.patch(\n 'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'\n )\n async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):\n mock = async_mock.MagicMock()\n mock_async_conn.__aenter__.return_value = mock\n mock.get_waiter().wait = AsyncMock()\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n generator = redshift_create_cluster_trigger.run()\n response = await generator.asend(None)\n assert response == TriggerEvent({'status': 'success', 'message':\n 'Cluster Created'})\n",
"step-2": "<mask token>\nif sys.version_info < (3, 8):\n from asynctest import CoroutineMock as AsyncMock, mock as async_mock\nelse:\n from unittest import mock as async_mock\n from unittest.mock import AsyncMock\n<mask token>\n\n\nclass TestRedshiftCreateClusterTrigger:\n\n def test_redshift_create_cluster_trigger_serialize(self):\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n class_path, args = redshift_create_cluster_trigger.serialize()\n assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'\n assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER\n assert args['poll_interval'] == str(TEST_POLL_INTERVAL)\n assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)\n assert args['aws_conn_id'] == TEST_AWS_CONN_ID\n\n @pytest.mark.asyncio\n @async_mock.patch(\n 'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'\n )\n async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):\n mock = async_mock.MagicMock()\n mock_async_conn.__aenter__.return_value = mock\n mock.get_waiter().wait = AsyncMock()\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n generator = redshift_create_cluster_trigger.run()\n response = await generator.asend(None)\n assert response == TriggerEvent({'status': 'success', 'message':\n 'Cluster Created'})\n",
"step-3": "<mask token>\nif sys.version_info < (3, 8):\n from asynctest import CoroutineMock as AsyncMock, mock as async_mock\nelse:\n from unittest import mock as async_mock\n from unittest.mock import AsyncMock\nTEST_CLUSTER_IDENTIFIER = 'test-cluster'\nTEST_POLL_INTERVAL = 10\nTEST_MAX_ATTEMPT = 10\nTEST_AWS_CONN_ID = 'test-aws-id'\n\n\nclass TestRedshiftCreateClusterTrigger:\n\n def test_redshift_create_cluster_trigger_serialize(self):\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n class_path, args = redshift_create_cluster_trigger.serialize()\n assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'\n assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER\n assert args['poll_interval'] == str(TEST_POLL_INTERVAL)\n assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)\n assert args['aws_conn_id'] == TEST_AWS_CONN_ID\n\n @pytest.mark.asyncio\n @async_mock.patch(\n 'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'\n )\n async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):\n mock = async_mock.MagicMock()\n mock_async_conn.__aenter__.return_value = mock\n mock.get_waiter().wait = AsyncMock()\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n generator = redshift_create_cluster_trigger.run()\n response = await generator.asend(None)\n assert response == TriggerEvent({'status': 'success', 'message':\n 'Cluster Created'})\n",
"step-4": "from __future__ import annotations\nimport sys\nimport pytest\nfrom airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger\nfrom airflow.triggers.base import TriggerEvent\nif sys.version_info < (3, 8):\n from asynctest import CoroutineMock as AsyncMock, mock as async_mock\nelse:\n from unittest import mock as async_mock\n from unittest.mock import AsyncMock\nTEST_CLUSTER_IDENTIFIER = 'test-cluster'\nTEST_POLL_INTERVAL = 10\nTEST_MAX_ATTEMPT = 10\nTEST_AWS_CONN_ID = 'test-aws-id'\n\n\nclass TestRedshiftCreateClusterTrigger:\n\n def test_redshift_create_cluster_trigger_serialize(self):\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n class_path, args = redshift_create_cluster_trigger.serialize()\n assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'\n assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER\n assert args['poll_interval'] == str(TEST_POLL_INTERVAL)\n assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)\n assert args['aws_conn_id'] == TEST_AWS_CONN_ID\n\n @pytest.mark.asyncio\n @async_mock.patch(\n 'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'\n )\n async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):\n mock = async_mock.MagicMock()\n mock_async_conn.__aenter__.return_value = mock\n mock.get_waiter().wait = AsyncMock()\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=\n TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=\n TEST_AWS_CONN_ID)\n generator = redshift_create_cluster_trigger.run()\n response = await generator.asend(None)\n assert response == TriggerEvent({'status': 'success', 'message':\n 'Cluster Created'})\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport sys\n\nimport pytest\n\nfrom airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger\nfrom airflow.triggers.base import TriggerEvent\n\nif sys.version_info < (3, 8):\n from asynctest import CoroutineMock as AsyncMock, mock as async_mock\nelse:\n from unittest import mock as async_mock\n from unittest.mock import AsyncMock\n\n\nTEST_CLUSTER_IDENTIFIER = \"test-cluster\"\nTEST_POLL_INTERVAL = 10\nTEST_MAX_ATTEMPT = 10\nTEST_AWS_CONN_ID = \"test-aws-id\"\n\n\nclass TestRedshiftCreateClusterTrigger:\n def test_redshift_create_cluster_trigger_serialize(self):\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER,\n poll_interval=TEST_POLL_INTERVAL,\n max_attempt=TEST_MAX_ATTEMPT,\n aws_conn_id=TEST_AWS_CONN_ID,\n )\n class_path, args = redshift_create_cluster_trigger.serialize()\n assert (\n class_path\n == \"airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger\"\n )\n assert args[\"cluster_identifier\"] == TEST_CLUSTER_IDENTIFIER\n assert args[\"poll_interval\"] == str(TEST_POLL_INTERVAL)\n assert args[\"max_attempt\"] == str(TEST_MAX_ATTEMPT)\n assert args[\"aws_conn_id\"] == TEST_AWS_CONN_ID\n\n @pytest.mark.asyncio\n @async_mock.patch(\"airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn\")\n async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):\n mock = async_mock.MagicMock()\n mock_async_conn.__aenter__.return_value = mock\n mock.get_waiter().wait = AsyncMock()\n\n redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(\n cluster_identifier=TEST_CLUSTER_IDENTIFIER,\n poll_interval=TEST_POLL_INTERVAL,\n max_attempt=TEST_MAX_ATTEMPT,\n aws_conn_id=TEST_AWS_CONN_ID,\n )\n\n generator = redshift_create_cluster_trigger.run()\n response = await generator.asend(None)\n\n assert response == TriggerEvent({\"status\": \"success\", \"message\": \"Cluster Created\"})\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError('Incorrect types')
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError('Incorrect types')
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x) / 2
y = (self.p1.y + self.p2.y) / 2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.
p1.y) ** 2)
def tang(self):
return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
def __str__(self):
return f'p1={self.p1} p2={self.p2}'
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.
line2.p2.y + dy)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Point:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError('Incorrect types')
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError('Incorrect types')
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x) / 2
y = (self.p1.y + self.p2.y) / 2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.
p1.y) ** 2)
def tang(self):
return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
def __str__(self):
return f'p1={self.p1} p2={self.p2}'
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.
line2.p2.y + dy)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.constraints = []
def __str__(self):
return f'({self.x}, {self.y})'
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError('Incorrect types')
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError('Incorrect types')
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x) / 2
y = (self.p1.y + self.p2.y) / 2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.
p1.y) ** 2)
def tang(self):
return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
def __str__(self):
return f'p1={self.p1} p2={self.p2}'
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.
line2.p2.y + dy)
<|reserved_special_token_0|>
parall.get_const()
print(line22)
<|reserved_special_token_1|>
from typing import Tuple, List
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.constraints = []
def __str__(self):
return f'({self.x}, {self.y})'
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError('Incorrect types')
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError('Incorrect types')
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x) / 2
y = (self.p1.y + self.p2.y) / 2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.
p1.y) ** 2)
def tang(self):
return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)
def __str__(self):
return f'p1={self.p1} p2={self.p2}'
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.
line2.p2.y + dy)
p1 = Point(0, 0)
p2 = Point(2, 6)
p3 = Point(7, 0)
p4 = Point(10, 6)
line11 = Line(p1, p2)
line22 = Line(p3, p4)
parall = Parallelism(line11, line22)
parall.get_const()
print(line22)
<|reserved_special_token_1|>
from typing import Tuple, List
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
self.constraints = []
def __str__(self):
return f"({self.x}, {self.y})"
class Line:
def __init__(self, point1, point2):
if isinstance(point1, Point):
self.p1 = point1
elif isinstance(point1, (Tuple, List)):
self.p1 = Point(*point1)
else:
raise TypeError("Incorrect types")
if isinstance(point2, Point):
self.p2 = point2
elif isinstance(point1, (Tuple, List)):
self.p2 = Point(*point2)
else:
raise TypeError("Incorrect types")
self.constraints = []
def middle(self):
x = (self.p1.x + self.p2.x)/2
y = (self.p1.y + self.p2.y)/2
return Point(x, y)
def length(self):
return math.sqrt((self.p2.x - self.p1.x)**2 + (self.p2.y - self.p1.y)**2)
def tang(self):
return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x)
def __str__(self):
return f"p1={self.p1} p2={self.p2}"
class Constraints:
def __init__(self):
pass
class Parallelism(Constraints):
def __init__(self, line1, line2):
super().__init__()
self.line1 = line1
self.line2 = line2
def get_const(self):
dx = self.line2.length() / math.sqrt(1 + self.line1.tang()**2)
dy = self.line1.tang() * dx
self.line2.p2.x, self.line2.p2.y = self.line2.p1.x + dx, self.line2.p2.y + dy
p1 = Point(0, 0)
p2 = Point(2, 6)
p3 = Point(7, 0)
p4 = Point(10, 6)
line11 = Line(p1, p2)
line22 = Line(p3, p4)
parall = Parallelism(line11, line22)
parall.get_const()
print(line22)
|
flexible
|
{
"blob_id": "e59a51641dc2966b0170678de064e2845e170cf5",
"index": 4943,
"step-1": "<mask token>\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Point:\n <mask token>\n <mask token>\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.constraints = []\n\n def __str__(self):\n return f'({self.x}, {self.y})'\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\nparall.get_const()\nprint(line22)\n",
"step-4": "from typing import Tuple, List\nimport math\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.constraints = []\n\n def __str__(self):\n return f'({self.x}, {self.y})'\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\np1 = Point(0, 0)\np2 = Point(2, 6)\np3 = Point(7, 0)\np4 = Point(10, 6)\nline11 = Line(p1, p2)\nline22 = Line(p3, p4)\nparall = Parallelism(line11, line22)\nparall.get_const()\nprint(line22)\n",
"step-5": "from typing import Tuple, List\nimport math\n\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n self.constraints = []\n\n def __str__(self):\n return f\"({self.x}, {self.y})\"\n\n\nclass Line:\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError(\"Incorrect types\")\n\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError(\"Incorrect types\")\n\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x)/2\n y = (self.p1.y + self.p2.y)/2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x)**2 + (self.p2.y - self.p1.y)**2)\n\n def tang(self):\n return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x)\n\n def __str__(self):\n return f\"p1={self.p1} p2={self.p2}\"\n\n\nclass Constraints:\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang()**2)\n dy = self.line1.tang() * dx\n\n self.line2.p2.x, self.line2.p2.y = self.line2.p1.x + dx, self.line2.p2.y + dy\n\n\np1 = Point(0, 0)\np2 = Point(2, 6)\n\np3 = Point(7, 0)\np4 = Point(10, 6)\n\nline11 = Line(p1, p2)\nline22 = Line(p3, p4)\n\nparall = Parallelism(line11, line22)\nparall.get_const()\n\nprint(line22)\n",
"step-ids": [
11,
12,
15,
17,
18
]
}
|
[
11,
12,
15,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ == '__main__':
for i in range(101):
time.sleep(0.1)
jindutiao(i, 100)
<|reserved_special_token_1|>
import time
import sys
def jindutiao(jindu, zonge):
ret = jindu / zonge * 100
r = '\r%s%d%%' % ('=' * jindu, ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ == '__main__':
for i in range(101):
time.sleep(0.1)
jindutiao(i, 100)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Yuan
import time
import sys
def jindutiao(jindu,zonge):
ret = (jindu/zonge)*100
r = "\r%s%d%%"%("="*jindu,ret)
sys.stdout.write(r)
sys.stdout.flush()
if __name__ =="__main__":
for i in range(101):
time.sleep(0.1)
jindutiao(i,100)
|
flexible
|
{
"blob_id": "f7afd08fb8316e44c314d17ef382b98dde7eef91",
"index": 1605,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef jindutiao(jindu, zonge):\n ret = jindu / zonge * 100\n r = '\\r%s%d%%' % ('=' * jindu, ret)\n sys.stdout.write(r)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef jindutiao(jindu, zonge):\n ret = jindu / zonge * 100\n r = '\\r%s%d%%' % ('=' * jindu, ret)\n sys.stdout.write(r)\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n for i in range(101):\n time.sleep(0.1)\n jindutiao(i, 100)\n",
"step-4": "import time\nimport sys\n\n\ndef jindutiao(jindu, zonge):\n ret = jindu / zonge * 100\n r = '\\r%s%d%%' % ('=' * jindu, ret)\n sys.stdout.write(r)\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n for i in range(101):\n time.sleep(0.1)\n jindutiao(i, 100)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Yuan\n\n\nimport time\n\nimport sys\n\ndef jindutiao(jindu,zonge):\n\n ret = (jindu/zonge)*100\n\n r = \"\\r%s%d%%\"%(\"=\"*jindu,ret)\n sys.stdout.write(r)\n sys.stdout.flush()\n\n\nif __name__ ==\"__main__\":\n for i in range(101):\n time.sleep(0.1)\n jindutiao(i,100)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, redirect, request, session, flash
from data import db_session
from data import users, products
import os
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, NumberRange
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import datetime
from flask_restful import Api
import product_resource
from random import shuffle
app = Flask(__name__)
api = Api(app)
app.debug = True
UPLOAD_FOLDER = f'{os.getcwd()}\\static\\img\\profile_img'
app.config['SECRET_KEY'] = '12345aA'
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=1)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
login_manager = LoginManager()
login_manager.init_app(app)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
else:
if current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get('reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get('reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get('reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get('reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get('basket_count', 0),
title="CoolStore", tag=session.get('tag', ''), size=len(all_product),
filename=filename, product=all_product, simc=simc, simn=simn, simp=simp,
simnal=simnal)
@app.route('/login', methods=['GET', 'POST'])
def login():
session['tag'] = ''
form = LoginForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).filter(users.User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
bask = list(
map(lambda x: [session_in_db.query(products.Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return redirect("/")
return render_template('login_form.html',
message="Неправильный логин или пароль",
form=form)
return render_template('login_form.html', basket_count=session.get('basket_count', 0),
title='Авторизация', form=form, filename="profilem")
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect("/")
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField("Пол", validators=[DataRequired()], choices=[('1', 'М'), ('2', "Ж")])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
result = check_password(form.password.data)
if result != 'OK':
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, email_error="OK", again_password_error="OK",
password_error=result)
if form.password.data != form.password_again.data:
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, email_error="OK", password_error="OK",
again_password_error="Пароли не совпадают")
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
if session_in_db.query(users.User).filter(users.User.email == form.email.data).first():
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, password_error="OK", again_password_error="OK",
email_error="Такой пользователь уже есть")
if form.gender.data == '1':
gen = "Мужской"
else:
gen = "Женский"
user = users.User(
name=form.name.data,
midname=form.mname.data,
gender=gen,
email=form.email.data,
surname=form.surname.data,
age=form.age.data,
hashed_password=form.password.data
)
session_in_db.add(user)
session_in_db.commit()
return redirect('/login')
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация', form=form, filename="profilem",
email_error="OK", password_error="OK", again_password_error="OK")
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
if request.method == 'GET':
filename = get_profile_img()
params = {
'title': 'Профиль',
'filename': filename,
'id': current_user.id,
'name': current_user.name,
'sname': current_user.surname,
'mname': current_user.midname,
'gender': current_user.gender,
'age': current_user.age,
'basket_count': session.get('basket_count', 0)
}
return render_template('profile.html', **params)
elif request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], f'{current_user.id}.jpg'))
return redirect('/profile')
@app.route('/basket', methods=['GET', 'POST'])
@login_required
def basket():
sessions = db_session.create_session()
filename = get_profile_img()
user = load_user(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]
bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return render_template('basket.html', basket_count=session.get('basket_count', 0),
title='Корзина', filename=filename, bask=bask)
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = "Мужской"
else:
gen = "Женский"
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=filename,
basket_count=session.get('basket_count', 0), title='Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(), NumberRange(1)],
default=1)
submit = SubmitField('В корзину')
@app.route('/product/<int:product_id>', methods=['GET', 'POST'])
def product(product_id):
form = Buy()
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if form.validate_on_submit():
if current_user.is_authenticated:
if sessions.query(products.Products).get(product_id).existence and \
form.count.data <= prod.still_have:
prod.still_have -= form.count.data
if prod.still_have == 0:
prod.existence = 0
user = sessions.query(users.User).get(current_user.id)
if user.basket:
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
change_product = False
for item in bask:
if item[0] == product_id:
item[1] += form.count.data
change_product = True
if not change_product:
user.basket = user.basket + f'{product_id}-{form.count.data} '
else:
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
else:
user.basket = f'{product_id}-{form.count.data} '
sessions.commit()
else:
return render_template('product.html', prod=prod, filename=filename,
title=prod.title, form=form,
basket_count=session.get('basket_count', 0),
message='Товара в таком колличестве нет в наличии!')
else:
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title,
form=form, message='Вы не авторизованы')
return redirect('/basket')
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title,
form=form)
@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_plus(product_id):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if prod.still_have:
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] += 1
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod.still_have -= 1
sessions.commit()
return redirect('/basket')
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
@app.route('/change/<string:pos>')
def change(pos):
last_pos = session.get('sort', 'none')
if last_pos == pos:
session['reverse'] = not session.get('reverse', False)
else:
session['reverse'] = False
session['sort'] = pos
return redirect('/')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', "POST"])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="Неверный пароль",
again_password_error="OK", new_password_error="OK",
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", again_password_error="OK",
new_password_error="Новый пароль не должен совпадть со старым!",
filename=filename)
if result != 'OK':
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", again_password_error="OK",
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", new_password_error="OK",
again_password_error="Пароли не совпадают!", filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form,
basket_count=session.get('basket_count', 0), title="Сменить пароль",
filename=filename, old_password_error="OK", again_password_error="OK",
new_password_error="OK")
def main():
db_session.global_init("db/blogs.sqlite")
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource, '/api/v2/products/<int:product_id>')
app.run()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "d373d283a622262e2da974549907bdd8f61e89ec",
"index": 2114,
"step-1": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'\n\n\ndef get_profile_img():\n os.chdir('static\\\\img\\\\profile_img')\n if os.access(f'{current_user.id}.jpg', os.F_OK):\n filename = str(current_user.id)\n elif current_user.gender[0] == 'М':\n filename = 'profilem'\n else:\n filename = 'profilef'\n os.chdir('..\\\\..\\\\..')\n return filename\n\n\ndef find_products(tag):\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n sessions.commit()\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n ans_products = list()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n title = item.title.lower()\n if tag in title or title in tag:\n ans_products.append(item)\n return ans_products\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html', error=error)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n return session_in_db.query(users.User).get(user_id)\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n if request.method == 'POST':\n session['tag'] = request.form['search']\n return redirect('/')\n all_product = find_products(session.get('tag', '').lower())\n if session.get('reverse', False):\n sim = '▲'\n else:\n sim = '▼'\n simp = simc = simn = simnal = ''\n pos = session.get('sort', 'none')\n if pos == 'price':\n all_product.sort(key=lambda x: x.price, reverse=session.get(\n 'reverse', False))\n simp = sim\n elif pos == 'nal':\n all_product.sort(key=lambda x: x.existence, reverse=session.get(\n 'reverse', False))\n simnal = sim\n elif pos == 'count':\n all_product.sort(key=lambda x: x.still_have, reverse=session.get(\n 'reverse', False))\n simc = sim\n elif pos == 'name':\n simn = sim\n all_product.sort(key=lambda x: x.title, reverse=session.get(\n 'reverse', False))\n else:\n shuffle(all_product)\n return render_template('index.html', basket_count=session.get(\n 'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),\n size=len(all_product), filename=filename, product=all_product, simc\n =simc, simn=simn, simp=simp, simnal=simnal)\n\n\n<mask token>\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n session['tag'] = ''\n logout_user()\n return redirect('/')\n\n\nclass RegisterForm(FlaskForm):\n email = EmailField('Email', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[\n DataRequired()])\n surname = StringField('Фамилия', validators=[DataRequired()])\n name = StringField('Имя', validators=[DataRequired()])\n mname = StringField('Отчество(при наличии)', validators=[DataRequired()])\n gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',\n 'М'), ('2', 'Ж')])\n age = StringField('Возраст', validators=[DataRequired()])\n submit = SubmitField('Подтвердить')\n\n\nclass LengthError(Exception):\n error = 'Пароль должен состоять не менее чем из 8 символов!'\n\n\nclass SymbolError(Exception):\n error = 'В пароле должен быть хотя бы один символ!'\n\n\nclass LetterError(Exception):\n error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'\n\n\nclass DigitError(Exception):\n error = 'В пароле должна быть хотя бы одна цифра!'\n\n\ndef bool_ys(password):\n ys = [0, 0, 0, 0]\n for i in password:\n if i.isdigit():\n ys[0] = 1\n elif i.isalpha():\n if i.isupper():\n ys[1] = 1\n else:\n ys[2] = 1\n else:\n ys[3] = 1\n if ys[2] * ys[1] == 0:\n raise LetterError\n if ys[0] == 0:\n raise DigitError\n if ys[3] == 0:\n raise SymbolError\n return 'ok'\n\n\ndef check_password(password):\n try:\n if len(password) <= 8:\n raise LengthError\n bool_ys(password)\n return 'OK'\n except (LengthError, SymbolError, LetterError, DigitError) as ex:\n return ex.error\n\n\n<mask token>\n\n\n@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])\ndef delete(product_id, count):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += count\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(filter(lambda x: x[0] != product_id, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_profile', methods=['GET', 'POST'])\n@login_required\ndef redact_profile():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n form = RegisterForm()\n if request.method == 'GET':\n if user.gender == 'Мужской':\n gen = '1'\n else:\n gen = '2'\n form.gender.data = gen\n form.name.data = user.name\n form.mname.data = user.midname\n form.age.data = user.age\n form.surname.data = user.surname\n elif request.method == 'POST':\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user.gender = gen\n user.name = form.name.data\n user.midname = form.mname.data\n user.age = form.age.data\n user.surname = form.surname.data\n session_in_db.commit()\n return redirect('/profile')\n filename = get_profile_img()\n return render_template('redact_profile.html', form=form, filename=\n filename, basket_count=session.get('basket_count', 0), title=\n 'Редактирование')\n\n\nclass Buy(FlaskForm):\n count = IntegerField('Колличество:', validators=[DataRequired(),\n NumberRange(1)], default=1)\n submit = SubmitField('В корзину')\n\n\n<mask token>\n\n\n@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_minus(product_id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] -= 1\n bask = list(filter(lambda x: x[1] > 0, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += 1\n sessions.commit()\n return redirect('/basket')\n\n\n<mask token>\n\n\nclass ChangePasswordForm(FlaskForm):\n old_password = PasswordField('Старый пароль', validators=[DataRequired()])\n new_password = PasswordField('Новый пароль', validators=[DataRequired()])\n again_password = PasswordField('Повторите новый пароль', validators=[\n DataRequired()])\n submit = SubmitField('Сменить пароль')\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n filename = get_profile_img()\n form = ChangePasswordForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n if user.hashed_password != form.old_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='Неверный пароль',\n again_password_error='OK', new_password_error='OK',\n filename=filename)\n result = check_password(form.new_password.data)\n if user.hashed_password == form.new_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=\n 'Новый пароль не должен совпадть со старым!', filename=filename\n )\n if result != 'OK':\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=result, filename=filename)\n if form.new_password.data != form.again_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', new_password_error='OK',\n again_password_error='Пароли не совпадают!', filename=filename)\n user.hashed_password = form.new_password.data\n session_in_db.commit()\n return redirect('/profile')\n return render_template('change_password.html', form=form, basket_count=\n session.get('basket_count', 0), title='Сменить пароль', filename=\n filename, old_password_error='OK', again_password_error='OK',\n new_password_error='OK')\n\n\ndef main():\n db_session.global_init('db/blogs.sqlite')\n api.add_resource(product_resource.ProductListResource, '/api/v2/products')\n api.add_resource(product_resource.ProductResource,\n '/api/v2/products/<int:product_id>')\n app.run()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'\n\n\ndef get_profile_img():\n os.chdir('static\\\\img\\\\profile_img')\n if os.access(f'{current_user.id}.jpg', os.F_OK):\n filename = str(current_user.id)\n elif current_user.gender[0] == 'М':\n filename = 'profilem'\n else:\n filename = 'profilef'\n os.chdir('..\\\\..\\\\..')\n return filename\n\n\ndef find_products(tag):\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n sessions.commit()\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n ans_products = list()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n title = item.title.lower()\n if tag in title or title in tag:\n ans_products.append(item)\n return ans_products\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html', error=error)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n return session_in_db.query(users.User).get(user_id)\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n if request.method == 'POST':\n session['tag'] = request.form['search']\n return redirect('/')\n all_product = find_products(session.get('tag', '').lower())\n if session.get('reverse', False):\n sim = '▲'\n else:\n sim = '▼'\n simp = simc = simn = simnal = ''\n pos = session.get('sort', 'none')\n if pos == 'price':\n all_product.sort(key=lambda x: x.price, reverse=session.get(\n 'reverse', False))\n simp = sim\n elif pos == 'nal':\n all_product.sort(key=lambda x: x.existence, reverse=session.get(\n 'reverse', False))\n simnal = sim\n elif pos == 'count':\n all_product.sort(key=lambda x: x.still_have, reverse=session.get(\n 'reverse', False))\n simc = sim\n elif pos == 'name':\n simn = sim\n all_product.sort(key=lambda x: x.title, reverse=session.get(\n 'reverse', False))\n else:\n shuffle(all_product)\n return render_template('index.html', basket_count=session.get(\n 'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),\n size=len(all_product), filename=filename, product=all_product, simc\n =simc, simn=simn, simp=simp, simnal=simnal)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n session['tag'] = ''\n form = LoginForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).filter(users.User.email ==\n form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\n user.basket.strip().split()]\n bask = list(map(lambda x: [session_in_db.query(products.\n Products).get(x[0]), x[1]], bask))\n session['basket_count'] = len(bask)\n return redirect('/')\n return render_template('login_form.html', message=\n 'Неправильный логин или пароль', form=form)\n return render_template('login_form.html', basket_count=session.get(\n 'basket_count', 0), title='Авторизация', form=form, filename='profilem'\n )\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n session['tag'] = ''\n logout_user()\n return redirect('/')\n\n\nclass RegisterForm(FlaskForm):\n email = EmailField('Email', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[\n DataRequired()])\n surname = StringField('Фамилия', validators=[DataRequired()])\n name = StringField('Имя', validators=[DataRequired()])\n mname = StringField('Отчество(при наличии)', validators=[DataRequired()])\n gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',\n 'М'), ('2', 'Ж')])\n age = StringField('Возраст', validators=[DataRequired()])\n submit = SubmitField('Подтвердить')\n\n\nclass LengthError(Exception):\n error = 'Пароль должен состоять не менее чем из 8 символов!'\n\n\nclass SymbolError(Exception):\n error = 'В пароле должен быть хотя бы один символ!'\n\n\nclass LetterError(Exception):\n error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'\n\n\nclass DigitError(Exception):\n error = 'В пароле должна быть хотя бы одна цифра!'\n\n\ndef bool_ys(password):\n ys = [0, 0, 0, 0]\n for i in password:\n if i.isdigit():\n ys[0] = 1\n elif i.isalpha():\n if i.isupper():\n ys[1] = 1\n else:\n ys[2] = 1\n else:\n ys[3] = 1\n if ys[2] * ys[1] == 0:\n raise LetterError\n if ys[0] == 0:\n raise DigitError\n if ys[3] == 0:\n raise SymbolError\n return 'ok'\n\n\ndef check_password(password):\n try:\n if len(password) <= 8:\n raise LengthError\n bool_ys(password)\n return 'OK'\n except (LengthError, SymbolError, LetterError, DigitError) as ex:\n return ex.error\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n result = check_password(form.password.data)\n if result != 'OK':\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', again_password_error='OK', password_error\n =result)\n if form.password.data != form.password_again.data:\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', password_error='OK', again_password_error\n ='Пароли не совпадают')\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n if session_in_db.query(users.User).filter(users.User.email == form.\n email.data).first():\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n password_error='OK', again_password_error='OK', email_error\n ='Такой пользователь уже есть')\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user = users.User(name=form.name.data, midname=form.mname.data,\n gender=gen, email=form.email.data, surname=form.surname.data,\n age=form.age.data, hashed_password=form.password.data)\n session_in_db.add(user)\n session_in_db.commit()\n return redirect('/login')\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form, filename=\n 'profilem', email_error='OK', password_error='OK',\n again_password_error='OK')\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile():\n if request.method == 'GET':\n filename = get_profile_img()\n params = {'title': 'Профиль', 'filename': filename, 'id':\n current_user.id, 'name': current_user.name, 'sname':\n current_user.surname, 'mname': current_user.midname, 'gender':\n current_user.gender, 'age': current_user.age, 'basket_count':\n session.get('basket_count', 0)}\n return render_template('profile.html', **params)\n elif request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],\n f'{current_user.id}.jpg'))\n return redirect('/profile')\n\n\n<mask token>\n\n\n@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])\ndef delete(product_id, count):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += count\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(filter(lambda x: x[0] != product_id, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_profile', methods=['GET', 'POST'])\n@login_required\ndef redact_profile():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n form = RegisterForm()\n if request.method == 'GET':\n if user.gender == 'Мужской':\n gen = '1'\n else:\n gen = '2'\n form.gender.data = gen\n form.name.data = user.name\n form.mname.data = user.midname\n form.age.data = user.age\n form.surname.data = user.surname\n elif request.method == 'POST':\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user.gender = gen\n user.name = form.name.data\n user.midname = form.mname.data\n user.age = form.age.data\n user.surname = form.surname.data\n session_in_db.commit()\n return redirect('/profile')\n filename = get_profile_img()\n return render_template('redact_profile.html', form=form, filename=\n filename, basket_count=session.get('basket_count', 0), title=\n 'Редактирование')\n\n\nclass Buy(FlaskForm):\n count = IntegerField('Колличество:', validators=[DataRequired(),\n NumberRange(1)], default=1)\n submit = SubmitField('В корзину')\n\n\n@app.route('/product/<int:product_id>', methods=['GET', 'POST'])\ndef product(product_id):\n form = Buy()\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n if form.validate_on_submit():\n if current_user.is_authenticated:\n if sessions.query(products.Products).get(product_id\n ).existence and form.count.data <= prod.still_have:\n prod.still_have -= form.count.data\n if prod.still_have == 0:\n prod.existence = 0\n user = sessions.query(users.User).get(current_user.id)\n if user.basket:\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for\n x in user.basket.strip().split()]\n change_product = False\n for item in bask:\n if item[0] == product_id:\n item[1] += form.count.data\n change_product = True\n if not change_product:\n user.basket = (user.basket +\n f'{product_id}-{form.count.data} ')\n else:\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for\n x in bask])\n bask += ' '\n user.basket = bask\n else:\n user.basket = f'{product_id}-{form.count.data} '\n sessions.commit()\n else:\n return render_template('product.html', prod=prod, filename=\n filename, title=prod.title, form=form, basket_count=\n session.get('basket_count', 0), message=\n 'Товара в таком колличестве нет в наличии!')\n else:\n return render_template('product.html', prod=prod, filename=\n filename, basket_count=session.get('basket_count', 0),\n title=prod.title, form=form, message='Вы не авторизованы')\n return redirect('/basket')\n return render_template('product.html', prod=prod, filename=filename,\n basket_count=session.get('basket_count', 0), title=prod.title, form\n =form)\n\n\n<mask token>\n\n\n@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_minus(product_id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] -= 1\n bask = list(filter(lambda x: x[1] > 0, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += 1\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/change/<string:pos>')\ndef change(pos):\n last_pos = session.get('sort', 'none')\n if last_pos == pos:\n session['reverse'] = not session.get('reverse', False)\n else:\n session['reverse'] = False\n session['sort'] = pos\n return redirect('/')\n\n\nclass ChangePasswordForm(FlaskForm):\n old_password = PasswordField('Старый пароль', validators=[DataRequired()])\n new_password = PasswordField('Новый пароль', validators=[DataRequired()])\n again_password = PasswordField('Повторите новый пароль', validators=[\n DataRequired()])\n submit = SubmitField('Сменить пароль')\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n filename = get_profile_img()\n form = ChangePasswordForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n if user.hashed_password != form.old_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='Неверный пароль',\n again_password_error='OK', new_password_error='OK',\n filename=filename)\n result = check_password(form.new_password.data)\n if user.hashed_password == form.new_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=\n 'Новый пароль не должен совпадть со старым!', filename=filename\n )\n if result != 'OK':\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=result, filename=filename)\n if form.new_password.data != form.again_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', new_password_error='OK',\n again_password_error='Пароли не совпадают!', filename=filename)\n user.hashed_password = form.new_password.data\n session_in_db.commit()\n return redirect('/profile')\n return render_template('change_password.html', form=form, basket_count=\n session.get('basket_count', 0), title='Сменить пароль', filename=\n filename, old_password_error='OK', again_password_error='OK',\n new_password_error='OK')\n\n\ndef main():\n db_session.global_init('db/blogs.sqlite')\n api.add_resource(product_resource.ProductListResource, '/api/v2/products')\n api.add_resource(product_resource.ProductResource,\n '/api/v2/products/<int:product_id>')\n app.run()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'\n\n\ndef get_profile_img():\n os.chdir('static\\\\img\\\\profile_img')\n if os.access(f'{current_user.id}.jpg', os.F_OK):\n filename = str(current_user.id)\n elif current_user.gender[0] == 'М':\n filename = 'profilem'\n else:\n filename = 'profilef'\n os.chdir('..\\\\..\\\\..')\n return filename\n\n\ndef find_products(tag):\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n sessions.commit()\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n ans_products = list()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n title = item.title.lower()\n if tag in title or title in tag:\n ans_products.append(item)\n return ans_products\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html', error=error)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n return session_in_db.query(users.User).get(user_id)\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n if request.method == 'POST':\n session['tag'] = request.form['search']\n return redirect('/')\n all_product = find_products(session.get('tag', '').lower())\n if session.get('reverse', False):\n sim = '▲'\n else:\n sim = '▼'\n simp = simc = simn = simnal = ''\n pos = session.get('sort', 'none')\n if pos == 'price':\n all_product.sort(key=lambda x: x.price, reverse=session.get(\n 'reverse', False))\n simp = sim\n elif pos == 'nal':\n all_product.sort(key=lambda x: x.existence, reverse=session.get(\n 'reverse', False))\n simnal = sim\n elif pos == 'count':\n all_product.sort(key=lambda x: x.still_have, reverse=session.get(\n 'reverse', False))\n simc = sim\n elif pos == 'name':\n simn = sim\n all_product.sort(key=lambda x: x.title, reverse=session.get(\n 'reverse', False))\n else:\n shuffle(all_product)\n return render_template('index.html', basket_count=session.get(\n 'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),\n size=len(all_product), filename=filename, product=all_product, simc\n =simc, simn=simn, simp=simp, simnal=simnal)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n session['tag'] = ''\n form = LoginForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).filter(users.User.email ==\n form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\n user.basket.strip().split()]\n bask = list(map(lambda x: [session_in_db.query(products.\n Products).get(x[0]), x[1]], bask))\n session['basket_count'] = len(bask)\n return redirect('/')\n return render_template('login_form.html', message=\n 'Неправильный логин или пароль', form=form)\n return render_template('login_form.html', basket_count=session.get(\n 'basket_count', 0), title='Авторизация', form=form, filename='profilem'\n )\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n session['tag'] = ''\n logout_user()\n return redirect('/')\n\n\nclass RegisterForm(FlaskForm):\n email = EmailField('Email', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[\n DataRequired()])\n surname = StringField('Фамилия', validators=[DataRequired()])\n name = StringField('Имя', validators=[DataRequired()])\n mname = StringField('Отчество(при наличии)', validators=[DataRequired()])\n gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',\n 'М'), ('2', 'Ж')])\n age = StringField('Возраст', validators=[DataRequired()])\n submit = SubmitField('Подтвердить')\n\n\nclass LengthError(Exception):\n error = 'Пароль должен состоять не менее чем из 8 символов!'\n\n\nclass SymbolError(Exception):\n error = 'В пароле должен быть хотя бы один символ!'\n\n\nclass LetterError(Exception):\n error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'\n\n\nclass DigitError(Exception):\n error = 'В пароле должна быть хотя бы одна цифра!'\n\n\ndef bool_ys(password):\n ys = [0, 0, 0, 0]\n for i in password:\n if i.isdigit():\n ys[0] = 1\n elif i.isalpha():\n if i.isupper():\n ys[1] = 1\n else:\n ys[2] = 1\n else:\n ys[3] = 1\n if ys[2] * ys[1] == 0:\n raise LetterError\n if ys[0] == 0:\n raise DigitError\n if ys[3] == 0:\n raise SymbolError\n return 'ok'\n\n\ndef check_password(password):\n try:\n if len(password) <= 8:\n raise LengthError\n bool_ys(password)\n return 'OK'\n except (LengthError, SymbolError, LetterError, DigitError) as ex:\n return ex.error\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n result = check_password(form.password.data)\n if result != 'OK':\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', again_password_error='OK', password_error\n =result)\n if form.password.data != form.password_again.data:\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', password_error='OK', again_password_error\n ='Пароли не совпадают')\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n if session_in_db.query(users.User).filter(users.User.email == form.\n email.data).first():\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n password_error='OK', again_password_error='OK', email_error\n ='Такой пользователь уже есть')\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user = users.User(name=form.name.data, midname=form.mname.data,\n gender=gen, email=form.email.data, surname=form.surname.data,\n age=form.age.data, hashed_password=form.password.data)\n session_in_db.add(user)\n session_in_db.commit()\n return redirect('/login')\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form, filename=\n 'profilem', email_error='OK', password_error='OK',\n again_password_error='OK')\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile():\n if request.method == 'GET':\n filename = get_profile_img()\n params = {'title': 'Профиль', 'filename': filename, 'id':\n current_user.id, 'name': current_user.name, 'sname':\n current_user.surname, 'mname': current_user.midname, 'gender':\n current_user.gender, 'age': current_user.age, 'basket_count':\n session.get('basket_count', 0)}\n return render_template('profile.html', **params)\n elif request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],\n f'{current_user.id}.jpg'))\n return redirect('/profile')\n\n\n@app.route('/basket', methods=['GET', 'POST'])\n@login_required\ndef basket():\n sessions = db_session.create_session()\n filename = get_profile_img()\n user = load_user(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]),\n x[1]], bask))\n session['basket_count'] = len(bask)\n return render_template('basket.html', basket_count=session.get(\n 'basket_count', 0), title='Корзина', filename=filename, bask=bask)\n\n\n@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])\ndef delete(product_id, count):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += count\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(filter(lambda x: x[0] != product_id, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_profile', methods=['GET', 'POST'])\n@login_required\ndef redact_profile():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n form = RegisterForm()\n if request.method == 'GET':\n if user.gender == 'Мужской':\n gen = '1'\n else:\n gen = '2'\n form.gender.data = gen\n form.name.data = user.name\n form.mname.data = user.midname\n form.age.data = user.age\n form.surname.data = user.surname\n elif request.method == 'POST':\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user.gender = gen\n user.name = form.name.data\n user.midname = form.mname.data\n user.age = form.age.data\n user.surname = form.surname.data\n session_in_db.commit()\n return redirect('/profile')\n filename = get_profile_img()\n return render_template('redact_profile.html', form=form, filename=\n filename, basket_count=session.get('basket_count', 0), title=\n 'Редактирование')\n\n\nclass Buy(FlaskForm):\n count = IntegerField('Колличество:', validators=[DataRequired(),\n NumberRange(1)], default=1)\n submit = SubmitField('В корзину')\n\n\n@app.route('/product/<int:product_id>', methods=['GET', 'POST'])\ndef product(product_id):\n form = Buy()\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n if form.validate_on_submit():\n if current_user.is_authenticated:\n if sessions.query(products.Products).get(product_id\n ).existence and form.count.data <= prod.still_have:\n prod.still_have -= form.count.data\n if prod.still_have == 0:\n prod.existence = 0\n user = sessions.query(users.User).get(current_user.id)\n if user.basket:\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for\n x in user.basket.strip().split()]\n change_product = False\n for item in bask:\n if item[0] == product_id:\n item[1] += form.count.data\n change_product = True\n if not change_product:\n user.basket = (user.basket +\n f'{product_id}-{form.count.data} ')\n else:\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for\n x in bask])\n bask += ' '\n user.basket = bask\n else:\n user.basket = f'{product_id}-{form.count.data} '\n sessions.commit()\n else:\n return render_template('product.html', prod=prod, filename=\n filename, title=prod.title, form=form, basket_count=\n session.get('basket_count', 0), message=\n 'Товара в таком колличестве нет в наличии!')\n else:\n return render_template('product.html', prod=prod, filename=\n filename, basket_count=session.get('basket_count', 0),\n title=prod.title, form=form, message='Вы не авторизованы')\n return redirect('/basket')\n return render_template('product.html', prod=prod, filename=filename,\n basket_count=session.get('basket_count', 0), title=prod.title, form\n =form)\n\n\n@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_plus(product_id):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n if prod.still_have:\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] += 1\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod.still_have -= 1\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_minus(product_id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] -= 1\n bask = list(filter(lambda x: x[1] > 0, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += 1\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/change/<string:pos>')\ndef change(pos):\n last_pos = session.get('sort', 'none')\n if last_pos == pos:\n session['reverse'] = not session.get('reverse', False)\n else:\n session['reverse'] = False\n session['sort'] = pos\n return redirect('/')\n\n\nclass ChangePasswordForm(FlaskForm):\n old_password = PasswordField('Старый пароль', validators=[DataRequired()])\n new_password = PasswordField('Новый пароль', validators=[DataRequired()])\n again_password = PasswordField('Повторите новый пароль', validators=[\n DataRequired()])\n submit = SubmitField('Сменить пароль')\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n filename = get_profile_img()\n form = ChangePasswordForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n if user.hashed_password != form.old_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='Неверный пароль',\n again_password_error='OK', new_password_error='OK',\n filename=filename)\n result = check_password(form.new_password.data)\n if user.hashed_password == form.new_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=\n 'Новый пароль не должен совпадть со старым!', filename=filename\n )\n if result != 'OK':\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=result, filename=filename)\n if form.new_password.data != form.again_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', new_password_error='OK',\n again_password_error='Пароли не совпадают!', filename=filename)\n user.hashed_password = form.new_password.data\n session_in_db.commit()\n return redirect('/profile')\n return render_template('change_password.html', form=form, basket_count=\n session.get('basket_count', 0), title='Сменить пароль', filename=\n filename, old_password_error='OK', again_password_error='OK',\n new_password_error='OK')\n\n\ndef main():\n db_session.global_init('db/blogs.sqlite')\n api.add_resource(product_resource.ProductListResource, '/api/v2/products')\n api.add_resource(product_resource.ProductResource,\n '/api/v2/products/<int:product_id>')\n app.run()\n\n\n<mask token>\n",
"step-4": "from flask import Flask, render_template, redirect, request, session, flash\nfrom data import db_session\nfrom data import users, products\nimport os\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import DataRequired, NumberRange\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nimport datetime\nfrom flask_restful import Api\nimport product_resource\nfrom random import shuffle\napp = Flask(__name__)\napi = Api(app)\napp.debug = True\nUPLOAD_FOLDER = f'{os.getcwd()}\\\\static\\\\img\\\\profile_img'\napp.config['SECRET_KEY'] = '12345aA'\napp.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=1)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'\n\n\ndef get_profile_img():\n os.chdir('static\\\\img\\\\profile_img')\n if os.access(f'{current_user.id}.jpg', os.F_OK):\n filename = str(current_user.id)\n elif current_user.gender[0] == 'М':\n filename = 'profilem'\n else:\n filename = 'profilef'\n os.chdir('..\\\\..\\\\..')\n return filename\n\n\ndef find_products(tag):\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n sessions.commit()\n sessions = db_session.create_session()\n all_products = sessions.query(products.Products).all()\n ans_products = list()\n for item in all_products:\n if item.existence and item.still_have == 0:\n item.existence = 0\n elif not item.existence and item.still_have:\n item.existence = 1\n title = item.title.lower()\n if tag in title or title in tag:\n ans_products.append(item)\n return ans_products\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html', error=error)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n return session_in_db.query(users.User).get(user_id)\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Почта', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n remember_me = BooleanField('Запомнить меня')\n submit = SubmitField('Войти')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n if request.method == 'POST':\n session['tag'] = request.form['search']\n return redirect('/')\n all_product = find_products(session.get('tag', '').lower())\n if session.get('reverse', False):\n sim = '▲'\n else:\n sim = '▼'\n simp = simc = simn = simnal = ''\n pos = session.get('sort', 'none')\n if pos == 'price':\n all_product.sort(key=lambda x: x.price, reverse=session.get(\n 'reverse', False))\n simp = sim\n elif pos == 'nal':\n all_product.sort(key=lambda x: x.existence, reverse=session.get(\n 'reverse', False))\n simnal = sim\n elif pos == 'count':\n all_product.sort(key=lambda x: x.still_have, reverse=session.get(\n 'reverse', False))\n simc = sim\n elif pos == 'name':\n simn = sim\n all_product.sort(key=lambda x: x.title, reverse=session.get(\n 'reverse', False))\n else:\n shuffle(all_product)\n return render_template('index.html', basket_count=session.get(\n 'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),\n size=len(all_product), filename=filename, product=all_product, simc\n =simc, simn=simn, simp=simp, simnal=simnal)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n session['tag'] = ''\n form = LoginForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).filter(users.User.email ==\n form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\n user.basket.strip().split()]\n bask = list(map(lambda x: [session_in_db.query(products.\n Products).get(x[0]), x[1]], bask))\n session['basket_count'] = len(bask)\n return redirect('/')\n return render_template('login_form.html', message=\n 'Неправильный логин или пароль', form=form)\n return render_template('login_form.html', basket_count=session.get(\n 'basket_count', 0), title='Авторизация', form=form, filename='profilem'\n )\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n session['tag'] = ''\n logout_user()\n return redirect('/')\n\n\nclass RegisterForm(FlaskForm):\n email = EmailField('Email', validators=[DataRequired()])\n password = PasswordField('Пароль', validators=[DataRequired()])\n password_again = PasswordField('Повторите пароль', validators=[\n DataRequired()])\n surname = StringField('Фамилия', validators=[DataRequired()])\n name = StringField('Имя', validators=[DataRequired()])\n mname = StringField('Отчество(при наличии)', validators=[DataRequired()])\n gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',\n 'М'), ('2', 'Ж')])\n age = StringField('Возраст', validators=[DataRequired()])\n submit = SubmitField('Подтвердить')\n\n\nclass LengthError(Exception):\n error = 'Пароль должен состоять не менее чем из 8 символов!'\n\n\nclass SymbolError(Exception):\n error = 'В пароле должен быть хотя бы один символ!'\n\n\nclass LetterError(Exception):\n error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'\n\n\nclass DigitError(Exception):\n error = 'В пароле должна быть хотя бы одна цифра!'\n\n\ndef bool_ys(password):\n ys = [0, 0, 0, 0]\n for i in password:\n if i.isdigit():\n ys[0] = 1\n elif i.isalpha():\n if i.isupper():\n ys[1] = 1\n else:\n ys[2] = 1\n else:\n ys[3] = 1\n if ys[2] * ys[1] == 0:\n raise LetterError\n if ys[0] == 0:\n raise DigitError\n if ys[3] == 0:\n raise SymbolError\n return 'ok'\n\n\ndef check_password(password):\n try:\n if len(password) <= 8:\n raise LengthError\n bool_ys(password)\n return 'OK'\n except (LengthError, SymbolError, LetterError, DigitError) as ex:\n return ex.error\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n result = check_password(form.password.data)\n if result != 'OK':\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', again_password_error='OK', password_error\n =result)\n if form.password.data != form.password_again.data:\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n email_error='OK', password_error='OK', again_password_error\n ='Пароли не совпадают')\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n if session_in_db.query(users.User).filter(users.User.email == form.\n email.data).first():\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form,\n password_error='OK', again_password_error='OK', email_error\n ='Такой пользователь уже есть')\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user = users.User(name=form.name.data, midname=form.mname.data,\n gender=gen, email=form.email.data, surname=form.surname.data,\n age=form.age.data, hashed_password=form.password.data)\n session_in_db.add(user)\n session_in_db.commit()\n return redirect('/login')\n return render_template('reg.html', basket_count=session.get(\n 'basket_count', 0), title='Регистрация', form=form, filename=\n 'profilem', email_error='OK', password_error='OK',\n again_password_error='OK')\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@login_required\ndef profile():\n if request.method == 'GET':\n filename = get_profile_img()\n params = {'title': 'Профиль', 'filename': filename, 'id':\n current_user.id, 'name': current_user.name, 'sname':\n current_user.surname, 'mname': current_user.midname, 'gender':\n current_user.gender, 'age': current_user.age, 'basket_count':\n session.get('basket_count', 0)}\n return render_template('profile.html', **params)\n elif request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],\n f'{current_user.id}.jpg'))\n return redirect('/profile')\n\n\n@app.route('/basket', methods=['GET', 'POST'])\n@login_required\ndef basket():\n sessions = db_session.create_session()\n filename = get_profile_img()\n user = load_user(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]),\n x[1]], bask))\n session['basket_count'] = len(bask)\n return render_template('basket.html', basket_count=session.get(\n 'basket_count', 0), title='Корзина', filename=filename, bask=bask)\n\n\n@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])\ndef delete(product_id, count):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += count\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n bask = list(filter(lambda x: x[0] != product_id, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_profile', methods=['GET', 'POST'])\n@login_required\ndef redact_profile():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n form = RegisterForm()\n if request.method == 'GET':\n if user.gender == 'Мужской':\n gen = '1'\n else:\n gen = '2'\n form.gender.data = gen\n form.name.data = user.name\n form.mname.data = user.midname\n form.age.data = user.age\n form.surname.data = user.surname\n elif request.method == 'POST':\n if form.gender.data == '1':\n gen = 'Мужской'\n else:\n gen = 'Женский'\n user.gender = gen\n user.name = form.name.data\n user.midname = form.mname.data\n user.age = form.age.data\n user.surname = form.surname.data\n session_in_db.commit()\n return redirect('/profile')\n filename = get_profile_img()\n return render_template('redact_profile.html', form=form, filename=\n filename, basket_count=session.get('basket_count', 0), title=\n 'Редактирование')\n\n\nclass Buy(FlaskForm):\n count = IntegerField('Колличество:', validators=[DataRequired(),\n NumberRange(1)], default=1)\n submit = SubmitField('В корзину')\n\n\n@app.route('/product/<int:product_id>', methods=['GET', 'POST'])\ndef product(product_id):\n form = Buy()\n if current_user.is_authenticated:\n filename = get_profile_img()\n else:\n filename = 'profilem'\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n if form.validate_on_submit():\n if current_user.is_authenticated:\n if sessions.query(products.Products).get(product_id\n ).existence and form.count.data <= prod.still_have:\n prod.still_have -= form.count.data\n if prod.still_have == 0:\n prod.existence = 0\n user = sessions.query(users.User).get(current_user.id)\n if user.basket:\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for\n x in user.basket.strip().split()]\n change_product = False\n for item in bask:\n if item[0] == product_id:\n item[1] += form.count.data\n change_product = True\n if not change_product:\n user.basket = (user.basket +\n f'{product_id}-{form.count.data} ')\n else:\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for\n x in bask])\n bask += ' '\n user.basket = bask\n else:\n user.basket = f'{product_id}-{form.count.data} '\n sessions.commit()\n else:\n return render_template('product.html', prod=prod, filename=\n filename, title=prod.title, form=form, basket_count=\n session.get('basket_count', 0), message=\n 'Товара в таком колличестве нет в наличии!')\n else:\n return render_template('product.html', prod=prod, filename=\n filename, basket_count=session.get('basket_count', 0),\n title=prod.title, form=form, message='Вы не авторизованы')\n return redirect('/basket')\n return render_template('product.html', prod=prod, filename=filename,\n basket_count=session.get('basket_count', 0), title=prod.title, form\n =form)\n\n\n@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_plus(product_id):\n sessions = db_session.create_session()\n prod = sessions.query(products.Products).get(product_id)\n if prod.still_have:\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] += 1\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod.still_have -= 1\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])\ndef redact_prod_minus(product_id):\n sessions = db_session.create_session()\n user = sessions.query(users.User).get(current_user.id)\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.\n basket.strip().split()]\n for item in bask:\n if item[0] == product_id:\n item[1] -= 1\n bask = list(filter(lambda x: x[1] > 0, bask))\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\n bask += ' '\n user.basket = bask\n prod = sessions.query(products.Products).get(product_id)\n prod.still_have += 1\n sessions.commit()\n return redirect('/basket')\n\n\n@app.route('/change/<string:pos>')\ndef change(pos):\n last_pos = session.get('sort', 'none')\n if last_pos == pos:\n session['reverse'] = not session.get('reverse', False)\n else:\n session['reverse'] = False\n session['sort'] = pos\n return redirect('/')\n\n\nclass ChangePasswordForm(FlaskForm):\n old_password = PasswordField('Старый пароль', validators=[DataRequired()])\n new_password = PasswordField('Новый пароль', validators=[DataRequired()])\n again_password = PasswordField('Повторите новый пароль', validators=[\n DataRequired()])\n submit = SubmitField('Сменить пароль')\n\n\n@app.route('/change_password', methods=['GET', 'POST'])\n@login_required\ndef change_password():\n filename = get_profile_img()\n form = ChangePasswordForm()\n if form.validate_on_submit():\n db_session.global_init('db/blogs.sqlite')\n session_in_db = db_session.create_session()\n user = session_in_db.query(users.User).get(current_user.id)\n if user.hashed_password != form.old_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='Неверный пароль',\n again_password_error='OK', new_password_error='OK',\n filename=filename)\n result = check_password(form.new_password.data)\n if user.hashed_password == form.new_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=\n 'Новый пароль не должен совпадть со старым!', filename=filename\n )\n if result != 'OK':\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', again_password_error='OK',\n new_password_error=result, filename=filename)\n if form.new_password.data != form.again_password.data:\n return render_template('change_password.html', basket_count=\n session.get('basket_count', 0), title='Регистрация', form=\n form, old_password_error='OK', new_password_error='OK',\n again_password_error='Пароли не совпадают!', filename=filename)\n user.hashed_password = form.new_password.data\n session_in_db.commit()\n return redirect('/profile')\n return render_template('change_password.html', form=form, basket_count=\n session.get('basket_count', 0), title='Сменить пароль', filename=\n filename, old_password_error='OK', again_password_error='OK',\n new_password_error='OK')\n\n\ndef main():\n db_session.global_init('db/blogs.sqlite')\n api.add_resource(product_resource.ProductListResource, '/api/v2/products')\n api.add_resource(product_resource.ProductResource,\n '/api/v2/products/<int:product_id>')\n app.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from flask import Flask, render_template, redirect, request, session, flash\r\nfrom data import db_session\r\nfrom data import users, products\r\nimport os\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField\r\nfrom wtforms.fields.html5 import EmailField\r\nfrom wtforms.validators import DataRequired, NumberRange\r\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\r\nimport datetime\r\nfrom flask_restful import Api\r\nimport product_resource\r\nfrom random import shuffle\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\napp.debug = True\r\n\r\nUPLOAD_FOLDER = f'{os.getcwd()}\\\\static\\\\img\\\\profile_img'\r\n\r\napp.config['SECRET_KEY'] = '12345aA'\r\napp.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=1)\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\n\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() == 'jpg'\r\n\r\n\r\ndef get_profile_img():\r\n os.chdir('static\\\\img\\\\profile_img')\r\n if os.access(f'{current_user.id}.jpg', os.F_OK):\r\n filename = str(current_user.id)\r\n else:\r\n if current_user.gender[0] == 'М':\r\n filename = 'profilem'\r\n else:\r\n filename = 'profilef'\r\n os.chdir('..\\\\..\\\\..')\r\n return filename\r\n\r\n\r\ndef find_products(tag):\r\n sessions = db_session.create_session()\r\n all_products = sessions.query(products.Products).all()\r\n for item in all_products:\r\n if item.existence and item.still_have == 0:\r\n item.existence = 0\r\n elif not item.existence and item.still_have:\r\n item.existence = 1\r\n sessions.commit()\r\n sessions = db_session.create_session()\r\n all_products = sessions.query(products.Products).all()\r\n ans_products = list()\r\n for item in all_products:\r\n if item.existence and item.still_have == 0:\r\n item.existence = 0\r\n elif not item.existence and item.still_have:\r\n item.existence = 1\r\n title = item.title.lower()\r\n if tag in title or title in tag:\r\n ans_products.append(item)\r\n return ans_products\r\n\r\n\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n return render_template('404.html', error=error)\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n db_session.global_init('db/blogs.sqlite')\r\n session_in_db = db_session.create_session()\r\n return session_in_db.query(users.User).get(user_id)\r\n\r\n\r\nclass LoginForm(FlaskForm):\r\n email = EmailField('Почта', validators=[DataRequired()])\r\n password = PasswordField('Пароль', validators=[DataRequired()])\r\n remember_me = BooleanField('Запомнить меня')\r\n submit = SubmitField('Войти')\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if current_user.is_authenticated:\r\n filename = get_profile_img()\r\n else:\r\n filename = 'profilem'\r\n if request.method == 'POST':\r\n session['tag'] = request.form['search']\r\n return redirect('/')\r\n all_product = find_products(session.get('tag', '').lower())\r\n if session.get('reverse', False):\r\n sim = '▲'\r\n else:\r\n sim = '▼'\r\n simp = simc = simn = simnal = ''\r\n pos = session.get('sort', 'none')\r\n if pos == 'price':\r\n all_product.sort(key=lambda x: x.price, reverse=session.get('reverse', False))\r\n simp = sim\r\n elif pos == 'nal':\r\n all_product.sort(key=lambda x: x.existence, reverse=session.get('reverse', False))\r\n simnal = sim\r\n elif pos == 'count':\r\n all_product.sort(key=lambda x: x.still_have, reverse=session.get('reverse', False))\r\n simc = sim\r\n elif pos == 'name':\r\n simn = sim\r\n all_product.sort(key=lambda x: x.title, reverse=session.get('reverse', False))\r\n else:\r\n shuffle(all_product)\r\n return render_template('index.html', basket_count=session.get('basket_count', 0),\r\n title=\"CoolStore\", tag=session.get('tag', ''), size=len(all_product),\r\n filename=filename, product=all_product, simc=simc, simn=simn, simp=simp,\r\n simnal=simnal)\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n session['tag'] = ''\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n db_session.global_init('db/blogs.sqlite')\r\n session_in_db = db_session.create_session()\r\n user = session_in_db.query(users.User).filter(users.User.email == form.email.data).first()\r\n if user and user.check_password(form.password.data):\r\n login_user(user, remember=form.remember_me.data)\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\r\n user.basket.strip().split()]\r\n bask = list(\r\n map(lambda x: [session_in_db.query(products.Products).get(x[0]), x[1]], bask))\r\n session['basket_count'] = len(bask)\r\n return redirect(\"/\")\r\n return render_template('login_form.html',\r\n message=\"Неправильный логин или пароль\",\r\n form=form)\r\n return render_template('login_form.html', basket_count=session.get('basket_count', 0),\r\n title='Авторизация', form=form, filename=\"profilem\")\r\n\r\n\r\n@app.route('/logout')\r\n@login_required\r\ndef logout():\r\n session['tag'] = ''\r\n logout_user()\r\n return redirect(\"/\")\r\n\r\n\r\nclass RegisterForm(FlaskForm):\r\n email = EmailField('Email', validators=[DataRequired()])\r\n password = PasswordField('Пароль', validators=[DataRequired()])\r\n password_again = PasswordField('Повторите пароль', validators=[DataRequired()])\r\n surname = StringField('Фамилия', validators=[DataRequired()])\r\n name = StringField('Имя', validators=[DataRequired()])\r\n mname = StringField('Отчество(при наличии)', validators=[DataRequired()])\r\n gender = SelectField(\"Пол\", validators=[DataRequired()], choices=[('1', 'М'), ('2', \"Ж\")])\r\n age = StringField('Возраст', validators=[DataRequired()])\r\n submit = SubmitField('Подтвердить')\r\n\r\n\r\nclass LengthError(Exception):\r\n error = 'Пароль должен состоять не менее чем из 8 символов!'\r\n\r\n\r\nclass SymbolError(Exception):\r\n error = 'В пароле должен быть хотя бы один символ!'\r\n\r\n\r\nclass LetterError(Exception):\r\n error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'\r\n\r\n\r\nclass DigitError(Exception):\r\n error = 'В пароле должна быть хотя бы одна цифра!'\r\n\r\n\r\ndef bool_ys(password):\r\n ys = [0, 0, 0, 0]\r\n for i in password:\r\n if i.isdigit():\r\n ys[0] = 1\r\n elif i.isalpha():\r\n if i.isupper():\r\n ys[1] = 1\r\n else:\r\n ys[2] = 1\r\n else:\r\n ys[3] = 1\r\n if ys[2] * ys[1] == 0:\r\n raise LetterError\r\n if ys[0] == 0:\r\n raise DigitError\r\n if ys[3] == 0:\r\n raise SymbolError\r\n return 'ok'\r\n\r\n\r\ndef check_password(password):\r\n try:\r\n if len(password) <= 8:\r\n raise LengthError\r\n bool_ys(password)\r\n return 'OK'\r\n except (LengthError, SymbolError, LetterError, DigitError) as ex:\r\n return ex.error\r\n\r\n\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef reqister():\r\n form = RegisterForm()\r\n if form.validate_on_submit():\r\n result = check_password(form.password.data)\r\n if result != 'OK':\r\n return render_template('reg.html', basket_count=session.get('basket_count', 0),\r\n title='Регистрация',\r\n form=form, email_error=\"OK\", again_password_error=\"OK\",\r\n password_error=result)\r\n if form.password.data != form.password_again.data:\r\n return render_template('reg.html', basket_count=session.get('basket_count', 0),\r\n title='Регистрация',\r\n form=form, email_error=\"OK\", password_error=\"OK\",\r\n again_password_error=\"Пароли не совпадают\")\r\n db_session.global_init('db/blogs.sqlite')\r\n session_in_db = db_session.create_session()\r\n if session_in_db.query(users.User).filter(users.User.email == form.email.data).first():\r\n return render_template('reg.html', basket_count=session.get('basket_count', 0),\r\n title='Регистрация',\r\n form=form, password_error=\"OK\", again_password_error=\"OK\",\r\n email_error=\"Такой пользователь уже есть\")\r\n if form.gender.data == '1':\r\n gen = \"Мужской\"\r\n else:\r\n gen = \"Женский\"\r\n user = users.User(\r\n name=form.name.data,\r\n midname=form.mname.data,\r\n gender=gen,\r\n email=form.email.data,\r\n surname=form.surname.data,\r\n age=form.age.data,\r\n hashed_password=form.password.data\r\n )\r\n session_in_db.add(user)\r\n session_in_db.commit()\r\n return redirect('/login')\r\n return render_template('reg.html', basket_count=session.get('basket_count', 0),\r\n title='Регистрация', form=form, filename=\"profilem\",\r\n email_error=\"OK\", password_error=\"OK\", again_password_error=\"OK\")\r\n\r\n\r\n@app.route('/profile', methods=['GET', 'POST'])\r\n@login_required\r\ndef profile():\r\n if request.method == 'GET':\r\n filename = get_profile_img()\r\n params = {\r\n 'title': 'Профиль',\r\n 'filename': filename,\r\n 'id': current_user.id,\r\n 'name': current_user.name,\r\n 'sname': current_user.surname,\r\n 'mname': current_user.midname,\r\n 'gender': current_user.gender,\r\n 'age': current_user.age,\r\n 'basket_count': session.get('basket_count', 0)\r\n }\r\n return render_template('profile.html', **params)\r\n elif request.method == 'POST':\r\n if 'file' not in request.files:\r\n flash('No file part')\r\n return redirect(request.url)\r\n file = request.files['file']\r\n if file.filename == '':\r\n flash('No selected file')\r\n return redirect(request.url)\r\n if file and allowed_file(file.filename):\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], f'{current_user.id}.jpg'))\r\n return redirect('/profile')\r\n\r\n\r\n@app.route('/basket', methods=['GET', 'POST'])\r\n@login_required\r\ndef basket():\r\n sessions = db_session.create_session()\r\n filename = get_profile_img()\r\n user = load_user(current_user.id)\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]\r\n bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]), x[1]], bask))\r\n session['basket_count'] = len(bask)\r\n return render_template('basket.html', basket_count=session.get('basket_count', 0),\r\n title='Корзина', filename=filename, bask=bask)\r\n\r\n\r\n@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])\r\ndef delete(product_id, count):\r\n sessions = db_session.create_session()\r\n prod = sessions.query(products.Products).get(product_id)\r\n prod.still_have += count\r\n user = sessions.query(users.User).get(current_user.id)\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]\r\n bask = list(filter(lambda x: x[0] != product_id, bask))\r\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\r\n bask += ' '\r\n user.basket = bask\r\n sessions.commit()\r\n return redirect('/basket')\r\n\r\n\r\n@app.route('/redact_profile', methods=['GET', 'POST'])\r\n@login_required\r\ndef redact_profile():\r\n db_session.global_init('db/blogs.sqlite')\r\n session_in_db = db_session.create_session()\r\n user = session_in_db.query(users.User).get(current_user.id)\r\n form = RegisterForm()\r\n if request.method == 'GET':\r\n if user.gender == 'Мужской':\r\n gen = '1'\r\n else:\r\n gen = '2'\r\n form.gender.data = gen\r\n form.name.data = user.name\r\n form.mname.data = user.midname\r\n form.age.data = user.age\r\n form.surname.data = user.surname\r\n elif request.method == 'POST':\r\n if form.gender.data == '1':\r\n gen = \"Мужской\"\r\n else:\r\n gen = \"Женский\"\r\n user.gender = gen\r\n user.name = form.name.data\r\n user.midname = form.mname.data\r\n user.age = form.age.data\r\n user.surname = form.surname.data\r\n session_in_db.commit()\r\n return redirect('/profile')\r\n filename = get_profile_img()\r\n return render_template('redact_profile.html', form=form, filename=filename,\r\n basket_count=session.get('basket_count', 0), title='Редактирование')\r\n\r\n\r\nclass Buy(FlaskForm):\r\n count = IntegerField('Колличество:', validators=[DataRequired(), NumberRange(1)],\r\n default=1)\r\n submit = SubmitField('В корзину')\r\n\r\n\r\n@app.route('/product/<int:product_id>', methods=['GET', 'POST'])\r\ndef product(product_id):\r\n form = Buy()\r\n if current_user.is_authenticated:\r\n filename = get_profile_img()\r\n else:\r\n filename = 'profilem'\r\n sessions = db_session.create_session()\r\n prod = sessions.query(products.Products).get(product_id)\r\n if form.validate_on_submit():\r\n if current_user.is_authenticated:\r\n if sessions.query(products.Products).get(product_id).existence and \\\r\n form.count.data <= prod.still_have:\r\n prod.still_have -= form.count.data\r\n if prod.still_have == 0:\r\n prod.existence = 0\r\n user = sessions.query(users.User).get(current_user.id)\r\n if user.basket:\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\r\n user.basket.strip().split()]\r\n change_product = False\r\n for item in bask:\r\n if item[0] == product_id:\r\n item[1] += form.count.data\r\n change_product = True\r\n if not change_product:\r\n user.basket = user.basket + f'{product_id}-{form.count.data} '\r\n else:\r\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\r\n bask += ' '\r\n user.basket = bask\r\n else:\r\n user.basket = f'{product_id}-{form.count.data} '\r\n sessions.commit()\r\n else:\r\n return render_template('product.html', prod=prod, filename=filename,\r\n title=prod.title, form=form,\r\n basket_count=session.get('basket_count', 0),\r\n message='Товара в таком колличестве нет в наличии!')\r\n else:\r\n return render_template('product.html', prod=prod, filename=filename,\r\n basket_count=session.get('basket_count', 0), title=prod.title,\r\n form=form, message='Вы не авторизованы')\r\n return redirect('/basket')\r\n return render_template('product.html', prod=prod, filename=filename,\r\n basket_count=session.get('basket_count', 0), title=prod.title,\r\n form=form)\r\n\r\n\r\n@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])\r\ndef redact_prod_plus(product_id):\r\n sessions = db_session.create_session()\r\n prod = sessions.query(products.Products).get(product_id)\r\n if prod.still_have:\r\n user = sessions.query(users.User).get(current_user.id)\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\r\n user.basket.strip().split()]\r\n for item in bask:\r\n if item[0] == product_id:\r\n item[1] += 1\r\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\r\n bask += ' '\r\n user.basket = bask\r\n prod.still_have -= 1\r\n sessions.commit()\r\n return redirect('/basket')\r\n\r\n\r\n@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])\r\ndef redact_prod_minus(product_id):\r\n sessions = db_session.create_session()\r\n user = sessions.query(users.User).get(current_user.id)\r\n bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in\r\n user.basket.strip().split()]\r\n for item in bask:\r\n if item[0] == product_id:\r\n item[1] -= 1\r\n bask = list(filter(lambda x: x[1] > 0, bask))\r\n bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])\r\n bask += ' '\r\n user.basket = bask\r\n prod = sessions.query(products.Products).get(product_id)\r\n prod.still_have += 1\r\n sessions.commit()\r\n return redirect('/basket')\r\n\r\n\r\n@app.route('/change/<string:pos>')\r\ndef change(pos):\r\n last_pos = session.get('sort', 'none')\r\n if last_pos == pos:\r\n session['reverse'] = not session.get('reverse', False)\r\n else:\r\n session['reverse'] = False\r\n session['sort'] = pos\r\n return redirect('/')\r\n\r\n\r\nclass ChangePasswordForm(FlaskForm):\r\n old_password = PasswordField('Старый пароль', validators=[DataRequired()])\r\n new_password = PasswordField('Новый пароль', validators=[DataRequired()])\r\n again_password = PasswordField('Повторите новый пароль', validators=[DataRequired()])\r\n submit = SubmitField('Сменить пароль')\r\n\r\n\r\n@app.route('/change_password', methods=['GET', \"POST\"])\r\n@login_required\r\ndef change_password():\r\n filename = get_profile_img()\r\n form = ChangePasswordForm()\r\n if form.validate_on_submit():\r\n db_session.global_init('db/blogs.sqlite')\r\n session_in_db = db_session.create_session()\r\n user = session_in_db.query(users.User).get(current_user.id)\r\n if user.hashed_password != form.old_password.data:\r\n return render_template('change_password.html',\r\n basket_count=session.get('basket_count', 0), title='Регистрация',\r\n form=form, old_password_error=\"Неверный пароль\",\r\n again_password_error=\"OK\", new_password_error=\"OK\",\r\n filename=filename)\r\n result = check_password(form.new_password.data)\r\n if user.hashed_password == form.new_password.data:\r\n return render_template('change_password.html',\r\n basket_count=session.get('basket_count', 0), title='Регистрация',\r\n form=form, old_password_error=\"OK\", again_password_error=\"OK\",\r\n new_password_error=\"Новый пароль не должен совпадть со старым!\",\r\n filename=filename)\r\n if result != 'OK':\r\n return render_template('change_password.html',\r\n basket_count=session.get('basket_count', 0), title='Регистрация',\r\n form=form, old_password_error=\"OK\", again_password_error=\"OK\",\r\n new_password_error=result, filename=filename)\r\n if form.new_password.data != form.again_password.data:\r\n return render_template('change_password.html',\r\n basket_count=session.get('basket_count', 0), title='Регистрация',\r\n form=form, old_password_error=\"OK\", new_password_error=\"OK\",\r\n again_password_error=\"Пароли не совпадают!\", filename=filename)\r\n user.hashed_password = form.new_password.data\r\n session_in_db.commit()\r\n return redirect('/profile')\r\n return render_template('change_password.html', form=form,\r\n basket_count=session.get('basket_count', 0), title=\"Сменить пароль\",\r\n filename=filename, old_password_error=\"OK\", again_password_error=\"OK\",\r\n new_password_error=\"OK\")\r\n\r\n\r\ndef main():\r\n db_session.global_init(\"db/blogs.sqlite\")\r\n api.add_resource(product_resource.ProductListResource, '/api/v2/products')\r\n api.add_resource(product_resource.ProductResource, '/api/v2/products/<int:product_id>')\r\n app.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"step-ids": [
30,
35,
37,
40,
41
]
}
|
[
30,
35,
37,
40,
41
] |
# _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
# 本文件中,用__unicode__代替了__str__,以免在admin界面中显示中文而引发错误。
# 参考:http://blog.csdn.net/jiangnanandi/article/details/3574007
# 或者另一个解决方案:http://blog.sina.com.cn/s/blog_63cf1c510101an74.html
class FatherMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
son = models.BooleanField("子菜单?", default=False)
class Meta:
verbose_name = u"一级菜单"
verbose_name_plural = u"一级菜单"
def __unicode__(self):
return self.title
class SonMenu(models.Model):
title = models.CharField(u"菜单名", max_length=20)
slug = models.CharField(u"链接", max_length=100, db_index=True)
father = models.ForeignKey(
'seclab.FatherMenu', blank=True, null=True, verbose_name=u"父菜单")
class Meta:
verbose_name = u"二级菜单"
verbose_name_plural = u"二级菜单"
def __unicode__(self):
return self.title
class Img(models.Model):
tag = models.CharField(u"类型", max_length=20)
tagId = models.IntegerField(u"序号")
intro = models.CharField(u"描述", max_length=100)
title = models.CharField(u"标题", max_length=100)
slug = models.CharField(u"链接", max_length=100, db_index=True)
class Meta:
verbose_name = u"图片"
verbose_name_plural = u"图片"
def __unicode__(self):
return self.slug
class Article(models.Model):
tag = models.CharField(u"类型", max_length=20)
title = models.CharField(u"标题", max_length=100)
content = models.TextField(u"内容", default=u'', blank=True)
author = models.CharField(u"作者", max_length=100)
pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)
home_display = models.BooleanField(u"首页显示", default=False)
class Meta:
verbose_name = u"文章"
verbose_name_plural = u"文章"
def __unicode__(self):
return self.title
|
normal
|
{
"blob_id": "49b007b723b9c43fb79d5dffa2546c856faf4937",
"index": 8625,
"step-1": "<mask token>\n\n\nclass SonMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n <mask token>\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-2": "<mask token>\n\n\nclass SonMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-3": "<mask token>\n\n\nclass FatherMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'一级菜单'\n verbose_name_plural = u'一级菜单'\n <mask token>\n\n\nclass SonMenu(models.Model):\n title = models.CharField(u'菜单名', max_length=20)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n father = models.ForeignKey('seclab.FatherMenu', blank=True, null=True,\n verbose_name=u'父菜单')\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-4": "<mask token>\n\n\nclass FatherMenu(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = u'一级菜单'\n verbose_name_plural = u'一级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass SonMenu(models.Model):\n title = models.CharField(u'菜单名', max_length=20)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n father = models.ForeignKey('seclab.FatherMenu', blank=True, null=True,\n verbose_name=u'父菜单')\n\n\n class Meta:\n verbose_name = u'二级菜单'\n verbose_name_plural = u'二级菜单'\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n tagId = models.IntegerField(u'序号')\n intro = models.CharField(u'描述', max_length=100)\n title = models.CharField(u'标题', max_length=100)\n slug = models.CharField(u'链接', max_length=100, db_index=True)\n\n\n class Meta:\n verbose_name = u'图片'\n verbose_name_plural = u'图片'\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u'类型', max_length=20)\n title = models.CharField(u'标题', max_length=100)\n content = models.TextField(u'内容', default=u'', blank=True)\n author = models.CharField(u'作者', max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u'首页显示', default=False)\n\n\n class Meta:\n verbose_name = u'文章'\n verbose_name_plural = u'文章'\n\n def __unicode__(self):\n return self.title\n",
"step-5": "# _*_ coding:utf-8 _*_\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.core.urlresolvers import reverse \n# Create your models here.\n\n\n# 本文件中,用__unicode__代替了__str__,以免在admin界面中显示中文而引发错误。\n# 参考:http://blog.csdn.net/jiangnanandi/article/details/3574007\n# 或者另一个解决方案:http://blog.sina.com.cn/s/blog_63cf1c510101an74.html\n\n\nclass FatherMenu(models.Model):\n\n title = models.CharField(u\"菜单名\", max_length=20)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n son = models.BooleanField(\"子菜单?\", default=False)\n\n class Meta:\n verbose_name = u\"一级菜单\"\n verbose_name_plural = u\"一级菜单\"\n\n def __unicode__(self):\n return self.title\n\n\nclass SonMenu(models.Model):\n\n title = models.CharField(u\"菜单名\", max_length=20)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n father = models.ForeignKey(\n 'seclab.FatherMenu', blank=True, null=True, verbose_name=u\"父菜单\")\n\n class Meta:\n verbose_name = u\"二级菜单\"\n verbose_name_plural = u\"二级菜单\"\n\n def __unicode__(self):\n return self.title\n\n\nclass Img(models.Model):\n tag = models.CharField(u\"类型\", max_length=20)\n tagId = models.IntegerField(u\"序号\")\n intro = models.CharField(u\"描述\", max_length=100)\n title = models.CharField(u\"标题\", max_length=100)\n slug = models.CharField(u\"链接\", max_length=100, db_index=True)\n\n class Meta:\n verbose_name = u\"图片\"\n verbose_name_plural = u\"图片\"\n\n def __unicode__(self):\n return self.slug\n\n\nclass Article(models.Model):\n tag = models.CharField(u\"类型\", max_length=20)\n title = models.CharField(u\"标题\", max_length=100)\n content = models.TextField(u\"内容\", default=u'', blank=True)\n author = models.CharField(u\"作者\", max_length=100)\n pub_date = models.DateField(u'发表日期', auto_now_add=True, editable=True)\n home_display = models.BooleanField(u\"首页显示\", default=False)\n\n class Meta:\n verbose_name = u\"文章\"\n verbose_name_plural = u\"文章\"\n\n def __unicode__(self):\n return self.title\n",
"step-ids": [
7,
8,
10,
11,
14
]
}
|
[
7,
8,
10,
11,
14
] |
<|reserved_special_token_0|>
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content = ''
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:
content = fr.read()
content = content.split()
stop_words = ''
with open('stopWords.txt', 'r') as f2:
stop_words = f2.read()
stop_words = stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns = []
if content[i] not in stop_words:
if not content[i].isnumeric():
contentSyns = wn.synsets(content[i])
if len(contentSyns) > 0:
ethic_prob = max([(0 if e.path_similarity(c) == None else
e.path_similarity(c)) for e in ethic_set for c in
contentSyns])
standard_prob = max([(0 if s.path_similarity(c) == None
else s.path_similarity(c)) for s in standard_set for
c in contentSyns])
privacy_prob = max([(0 if p.path_similarity(c) == None else
p.path_similarity(c)) for p in privacy_set for c in
contentSyns])
education_prob = max([(0 if edu.path_similarity(c) ==
None else edu.path_similarity(c)) for edu in
education_set for c in contentSyns])
investment_prob = max([(0 if i.path_similarity(c) ==
None else i.path_similarity(c)) for i in
investment_set for c in contentSyns])
application_prob = max([(0 if a.path_similarity(c) ==
None else a.path_similarity(c)) for a in
application_set for c in contentSyns])
if ethic_prob > ethic_max_prob:
ethic_max_prob = ethic_prob
if standard_prob > standard_max_prob:
standard_max_prob = standard_prob
if privacy_prob > privacy_max_prob:
privacy_max_prob = privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',
privacy_max_prob, ' ', education_max_prob, ' ',
investment_max_prob, ' ', application_max_prob)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content = ''
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:
content = fr.read()
content = content.split()
stop_words = ''
with open('stopWords.txt', 'r') as f2:
stop_words = f2.read()
stop_words = stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns = []
if content[i] not in stop_words:
if not content[i].isnumeric():
contentSyns = wn.synsets(content[i])
if len(contentSyns) > 0:
ethic_prob = max([(0 if e.path_similarity(c) == None else
e.path_similarity(c)) for e in ethic_set for c in
contentSyns])
standard_prob = max([(0 if s.path_similarity(c) == None
else s.path_similarity(c)) for s in standard_set for
c in contentSyns])
privacy_prob = max([(0 if p.path_similarity(c) == None else
p.path_similarity(c)) for p in privacy_set for c in
contentSyns])
education_prob = max([(0 if edu.path_similarity(c) ==
None else edu.path_similarity(c)) for edu in
education_set for c in contentSyns])
investment_prob = max([(0 if i.path_similarity(c) ==
None else i.path_similarity(c)) for i in
investment_set for c in contentSyns])
application_prob = max([(0 if a.path_similarity(c) ==
None else a.path_similarity(c)) for a in
application_set for c in contentSyns])
if ethic_prob > ethic_max_prob:
ethic_max_prob = ethic_prob
if standard_prob > standard_max_prob:
standard_max_prob = standard_prob
if privacy_prob > privacy_max_prob:
privacy_max_prob = privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',
privacy_max_prob, ' ', education_max_prob, ' ',
investment_max_prob, ' ', application_max_prob)
<|reserved_special_token_0|>
for root, dirs, files in os.walk(file_dir):
for f in range(len(files)):
path1 = os.path.join(file_dir, files[f])
readOnePolicy(path1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content = ''
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:
content = fr.read()
content = content.split()
stop_words = ''
with open('stopWords.txt', 'r') as f2:
stop_words = f2.read()
stop_words = stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns = []
if content[i] not in stop_words:
if not content[i].isnumeric():
contentSyns = wn.synsets(content[i])
if len(contentSyns) > 0:
ethic_prob = max([(0 if e.path_similarity(c) == None else
e.path_similarity(c)) for e in ethic_set for c in
contentSyns])
standard_prob = max([(0 if s.path_similarity(c) == None
else s.path_similarity(c)) for s in standard_set for
c in contentSyns])
privacy_prob = max([(0 if p.path_similarity(c) == None else
p.path_similarity(c)) for p in privacy_set for c in
contentSyns])
education_prob = max([(0 if edu.path_similarity(c) ==
None else edu.path_similarity(c)) for edu in
education_set for c in contentSyns])
investment_prob = max([(0 if i.path_similarity(c) ==
None else i.path_similarity(c)) for i in
investment_set for c in contentSyns])
application_prob = max([(0 if a.path_similarity(c) ==
None else a.path_similarity(c)) for a in
application_set for c in contentSyns])
if ethic_prob > ethic_max_prob:
ethic_max_prob = ethic_prob
if standard_prob > standard_max_prob:
standard_max_prob = standard_prob
if privacy_prob > privacy_max_prob:
privacy_max_prob = privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',
privacy_max_prob, ' ', education_max_prob, ' ',
investment_max_prob, ' ', application_max_prob)
file_dir = 'txt'
for root, dirs, files in os.walk(file_dir):
for f in range(len(files)):
path1 = os.path.join(file_dir, files[f])
readOnePolicy(path1)
<|reserved_special_token_1|>
from nltk.corpus import wordnet as wn
import os
import codecs
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content = ''
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:
content = fr.read()
content = content.split()
stop_words = ''
with open('stopWords.txt', 'r') as f2:
stop_words = f2.read()
stop_words = stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns = []
if content[i] not in stop_words:
if not content[i].isnumeric():
contentSyns = wn.synsets(content[i])
if len(contentSyns) > 0:
ethic_prob = max([(0 if e.path_similarity(c) == None else
e.path_similarity(c)) for e in ethic_set for c in
contentSyns])
standard_prob = max([(0 if s.path_similarity(c) == None
else s.path_similarity(c)) for s in standard_set for
c in contentSyns])
privacy_prob = max([(0 if p.path_similarity(c) == None else
p.path_similarity(c)) for p in privacy_set for c in
contentSyns])
education_prob = max([(0 if edu.path_similarity(c) ==
None else edu.path_similarity(c)) for edu in
education_set for c in contentSyns])
investment_prob = max([(0 if i.path_similarity(c) ==
None else i.path_similarity(c)) for i in
investment_set for c in contentSyns])
application_prob = max([(0 if a.path_similarity(c) ==
None else a.path_similarity(c)) for a in
application_set for c in contentSyns])
if ethic_prob > ethic_max_prob:
ethic_max_prob = ethic_prob
if standard_prob > standard_max_prob:
standard_max_prob = standard_prob
if privacy_prob > privacy_max_prob:
privacy_max_prob = privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',
privacy_max_prob, ' ', education_max_prob, ' ',
investment_max_prob, ' ', application_max_prob)
file_dir = 'txt'
for root, dirs, files in os.walk(file_dir):
for f in range(len(files)):
path1 = os.path.join(file_dir, files[f])
readOnePolicy(path1)
<|reserved_special_token_1|>
#####################将政策文件中的内容抽取出来:标准、伦理、 3部分内容##########################
###########step 1:把3部分内容找到近义词,组成一个词表######
###########step 2:把文件与词表相匹配,判断文件到底在讲啥######
from nltk.corpus import wordnet as wn
import os
import codecs
# goods = wn.synsets('beautiful')
# beautifuls = wn.synsets('pretty')
# bads = wn.synsets('standard')
# print('good和bad的语义相似度为: ', max([0 if good.path_similarity(bad) == None else good.path_similarity(bad) for good in goods for bad in bads]))
def readOnePolicy(path2):
ethic_set = wn.synsets('ethic')
# print('ethic的同义词集为:', ethic_set)
# print('ethic的各同义词集包含的单词有:', [ethic.lemma_names() for ethic in ethic_set])
# print('ethic的各同义词集的具体定义是:',[dog.definition() for dog in ethic_set])
# print('ethic的各同义词集的例子是:',[dog.examples() for dog in ethic_set])
standard_set = wn.synsets('standard')
privacy_set = wn.synsets('privacy')
education_set = wn.synsets('education')
investment_set = wn.synsets('investment')
application_set = wn.synsets('application')
content=''
# with open(path2,'r',encoding='UTF-8') as f1:
# with open(path2, 'r', encoding='UTF-8') as f1:
with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:###这里用codecs防止编码出错
content=fr.read()
content=content.split()
# print(type(content))
# content = wn.synsets('standard')
# print('good和beautiful的语义相似度为: ', max([0 if one_ethic.path_similarity(one_word) == None else one_ethic.path_similarity(one_word) for one_ethic in ethic_set for one_word in content]))
#
# for ethic in ethic_set:
# # print(type(ethic.lemma_names()))##list
# for one_word in range(len(ethic.lemma_names())):
# print(ethic.lemma_names()[one_word])
# print('content和ethic的语义相似度为: ', max([0 if good.path_similarity(beautiful) == None else good.path_similarity(beautiful) for good in goods for beautiful in beautifuls]))
stop_words=''
with open('stopWords.txt','r') as f2:
stop_words=f2.read()
stop_words=stop_words.split()
ethic_max_prob = 0
standard_max_prob = 0
privacy_max_prob = 0
education_max_prob = 0
investment_max_prob = 0
application_max_prob = 0
for i in range(len(content)):
contentSyns=[]
if content[i] not in stop_words:
if not content[i].isnumeric():
# print(content[i],' content[i]')
contentSyns=wn.synsets(content[i])
# print(contentSyns,' contentsyns')###contentSyns有些是空的[],下面max()会报错
if len(contentSyns)>0:
ethic_prob=max([0 if e.path_similarity(c) == None else e.path_similarity(c) for e in ethic_set for c in contentSyns])
standard_prob = max([0 if s.path_similarity(c) == None else s.path_similarity(c) for s in standard_set for c in contentSyns])
privacy_prob = max([0 if p.path_similarity(c) == None else p.path_similarity(c) for p in privacy_set for c in contentSyns])
education_prob = max([0 if edu.path_similarity(c) == None else edu.path_similarity(c) for edu in education_set for c in contentSyns])
investment_prob = max([0 if i.path_similarity(c) == None else i.path_similarity(c) for i in investment_set for c in contentSyns])
application_prob = max([0 if a.path_similarity(c) == None else a.path_similarity(c) for a in application_set for c in contentSyns])
if ethic_prob>ethic_max_prob:
ethic_max_prob=ethic_prob
if standard_prob>standard_max_prob:
standard_max_prob=standard_prob
if privacy_prob>privacy_max_prob:
privacy_max_prob=privacy_prob
if education_prob > education_max_prob:
education_max_prob = education_prob
if investment_prob > investment_max_prob:
investment_max_prob = investment_prob
if application_prob > application_max_prob:
application_max_prob = application_prob
# print(max_prob,' 概率')
# print(ethic_max_prob,' ethic_max_prob')
# print(standard_max_prob,' standard_max_prob')
# print(privacy_max_prob,' privacy_max_prob')
print(path2,' ',ethic_max_prob,' ',standard_max_prob,' ',privacy_max_prob,' ',education_max_prob,' ',investment_max_prob,' ',application_max_prob)
file_dir = r"txt"
for root, dirs, files in os.walk(file_dir):
for f in range(len(files)):
path1=os.path.join(file_dir,files[f])
# print(path1,' doc_name')
readOnePolicy(path1)
# with open(path1, 'r') as f1:
# content = f1.read()
|
flexible
|
{
"blob_id": "caca4309034f08874e1e32828a601e7e3d4d3efd",
"index": 2058,
"step-1": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\n<mask token>\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-3": "<mask token>\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\nfile_dir = 'txt'\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-4": "from nltk.corpus import wordnet as wn\nimport os\nimport codecs\n\n\ndef readOnePolicy(path2):\n ethic_set = wn.synsets('ethic')\n standard_set = wn.synsets('standard')\n privacy_set = wn.synsets('privacy')\n education_set = wn.synsets('education')\n investment_set = wn.synsets('investment')\n application_set = wn.synsets('application')\n content = ''\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:\n content = fr.read()\n content = content.split()\n stop_words = ''\n with open('stopWords.txt', 'r') as f2:\n stop_words = f2.read()\n stop_words = stop_words.split()\n ethic_max_prob = 0\n standard_max_prob = 0\n privacy_max_prob = 0\n education_max_prob = 0\n investment_max_prob = 0\n application_max_prob = 0\n for i in range(len(content)):\n contentSyns = []\n if content[i] not in stop_words:\n if not content[i].isnumeric():\n contentSyns = wn.synsets(content[i])\n if len(contentSyns) > 0:\n ethic_prob = max([(0 if e.path_similarity(c) == None else\n e.path_similarity(c)) for e in ethic_set for c in\n contentSyns])\n standard_prob = max([(0 if s.path_similarity(c) == None\n else s.path_similarity(c)) for s in standard_set for\n c in contentSyns])\n privacy_prob = max([(0 if p.path_similarity(c) == None else\n p.path_similarity(c)) for p in privacy_set for c in\n contentSyns])\n education_prob = max([(0 if edu.path_similarity(c) ==\n None else edu.path_similarity(c)) for edu in\n education_set for c in contentSyns])\n investment_prob = max([(0 if i.path_similarity(c) ==\n None else i.path_similarity(c)) for i in\n investment_set for c in contentSyns])\n application_prob = max([(0 if a.path_similarity(c) ==\n None else a.path_similarity(c)) for a in\n application_set for c in contentSyns])\n if ethic_prob > ethic_max_prob:\n ethic_max_prob = ethic_prob\n if standard_prob > standard_max_prob:\n standard_max_prob = standard_prob\n if privacy_prob > privacy_max_prob:\n privacy_max_prob = privacy_prob\n if education_prob > education_max_prob:\n education_max_prob = education_prob\n if investment_prob > investment_max_prob:\n investment_max_prob = investment_prob\n if application_prob > application_max_prob:\n application_max_prob = application_prob\n print(path2, ' ', ethic_max_prob, ' ', standard_max_prob, ' ',\n privacy_max_prob, ' ', education_max_prob, ' ',\n investment_max_prob, ' ', application_max_prob)\n\n\nfile_dir = 'txt'\nfor root, dirs, files in os.walk(file_dir):\n for f in range(len(files)):\n path1 = os.path.join(file_dir, files[f])\n readOnePolicy(path1)\n",
"step-5": "#####################将政策文件中的内容抽取出来:标准、伦理、 3部分内容##########################\r\n###########step 1:把3部分内容找到近义词,组成一个词表######\r\n###########step 2:把文件与词表相匹配,判断文件到底在讲啥######\r\nfrom nltk.corpus import wordnet as wn\r\nimport os\r\nimport codecs\r\n# goods = wn.synsets('beautiful')\r\n# beautifuls = wn.synsets('pretty')\r\n# bads = wn.synsets('standard')\r\n\r\n# print('good和bad的语义相似度为: ', max([0 if good.path_similarity(bad) == None else good.path_similarity(bad) for good in goods for bad in bads]))\r\ndef readOnePolicy(path2):\r\n ethic_set = wn.synsets('ethic')\r\n # print('ethic的同义词集为:', ethic_set)\r\n # print('ethic的各同义词集包含的单词有:', [ethic.lemma_names() for ethic in ethic_set])\r\n # print('ethic的各同义词集的具体定义是:',[dog.definition() for dog in ethic_set])\r\n # print('ethic的各同义词集的例子是:',[dog.examples() for dog in ethic_set])\r\n standard_set = wn.synsets('standard')\r\n privacy_set = wn.synsets('privacy')\r\n education_set = wn.synsets('education')\r\n investment_set = wn.synsets('investment')\r\n application_set = wn.synsets('application')\r\n content=''\r\n # with open(path2,'r',encoding='UTF-8') as f1:\r\n # with open(path2, 'r', encoding='UTF-8') as f1:\r\n with codecs.open(path2, 'r', encoding=u'utf-8', errors='ignore') as fr:###这里用codecs防止编码出错\r\n content=fr.read()\r\n content=content.split()\r\n # print(type(content))\r\n\r\n # content = wn.synsets('standard')\r\n\r\n # print('good和beautiful的语义相似度为: ', max([0 if one_ethic.path_similarity(one_word) == None else one_ethic.path_similarity(one_word) for one_ethic in ethic_set for one_word in content]))\r\n #\r\n # for ethic in ethic_set:\r\n # # print(type(ethic.lemma_names()))##list\r\n # for one_word in range(len(ethic.lemma_names())):\r\n # print(ethic.lemma_names()[one_word])\r\n # print('content和ethic的语义相似度为: ', max([0 if good.path_similarity(beautiful) == None else good.path_similarity(beautiful) for good in goods for beautiful in beautifuls]))\r\n stop_words=''\r\n with open('stopWords.txt','r') as f2:\r\n stop_words=f2.read()\r\n stop_words=stop_words.split()\r\n\r\n ethic_max_prob = 0\r\n standard_max_prob = 0\r\n privacy_max_prob = 0\r\n education_max_prob = 0\r\n investment_max_prob = 0\r\n application_max_prob = 0\r\n for i in range(len(content)):\r\n contentSyns=[]\r\n if content[i] not in stop_words:\r\n if not content[i].isnumeric():\r\n # print(content[i],' content[i]')\r\n contentSyns=wn.synsets(content[i])\r\n # print(contentSyns,' contentsyns')###contentSyns有些是空的[],下面max()会报错\r\n if len(contentSyns)>0:\r\n ethic_prob=max([0 if e.path_similarity(c) == None else e.path_similarity(c) for e in ethic_set for c in contentSyns])\r\n standard_prob = max([0 if s.path_similarity(c) == None else s.path_similarity(c) for s in standard_set for c in contentSyns])\r\n privacy_prob = max([0 if p.path_similarity(c) == None else p.path_similarity(c) for p in privacy_set for c in contentSyns])\r\n education_prob = max([0 if edu.path_similarity(c) == None else edu.path_similarity(c) for edu in education_set for c in contentSyns])\r\n investment_prob = max([0 if i.path_similarity(c) == None else i.path_similarity(c) for i in investment_set for c in contentSyns])\r\n application_prob = max([0 if a.path_similarity(c) == None else a.path_similarity(c) for a in application_set for c in contentSyns])\r\n\r\n if ethic_prob>ethic_max_prob:\r\n ethic_max_prob=ethic_prob\r\n if standard_prob>standard_max_prob:\r\n standard_max_prob=standard_prob\r\n if privacy_prob>privacy_max_prob:\r\n privacy_max_prob=privacy_prob\r\n if education_prob > education_max_prob:\r\n education_max_prob = education_prob\r\n if investment_prob > investment_max_prob:\r\n investment_max_prob = investment_prob\r\n if application_prob > application_max_prob:\r\n application_max_prob = application_prob\r\n\r\n\r\n # print(max_prob,' 概率')\r\n\r\n # print(ethic_max_prob,' ethic_max_prob')\r\n # print(standard_max_prob,' standard_max_prob')\r\n # print(privacy_max_prob,' privacy_max_prob')\r\n print(path2,' ',ethic_max_prob,' ',standard_max_prob,' ',privacy_max_prob,' ',education_max_prob,' ',investment_max_prob,' ',application_max_prob)\r\n\r\nfile_dir = r\"txt\"\r\nfor root, dirs, files in os.walk(file_dir):\r\n for f in range(len(files)):\r\n path1=os.path.join(file_dir,files[f])\r\n # print(path1,' doc_name')\r\n readOnePolicy(path1)\r\n # with open(path1, 'r') as f1:\r\n # content = f1.read()\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0: return 1
intersection = np.logical_and(actual, predicted)
return 2. * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None: print("\n", end=" ")
else: print("\n", end=" ", file=open(text_file, "a"))
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * " " + "t/p" + (columnwidth - 3) // 2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
if text_file is None: print(" " + fst_empty_cell, end=" ")
else: print(" " + fst_empty_cell, end=" ", file = open(text_file, "a"))
for label in labels:
if text_file is None: print("%{0}s".format(columnwidth) % label, end=" ")
else: print("%{0}s".format(columnwidth) % label, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
# Print rows
for i, label1 in enumerate(labels):
if text_file is None: print(" %{0}s".format(columnwidth) % label1, end=" ")
else: print(" %{0}s".format(columnwidth) % label1, end=" ", file = open(text_file, "a"))
for j in range(len(labels)):
cell = "%{}d".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None: print(cell, end=" ")
else: print(cell, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes)==2:
mean_auc = roc_auc_score(y_true, y_proba[:,1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')
# mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')
# ovo should be better, but average is not clear from docs
# mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')
if print_conf:
if text_file is not None:
print("\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}".format(100*mcc, 100*f1, 100*mean_auc), end=" ", file=open(text_file, "a"))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
|
normal
|
{
"blob_id": "c599a75788e3548c52ebb3b29e7a2398ff1b28a2",
"index": 1808,
"step-1": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-3": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-4": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-5": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)\n return iou_score\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0: return 1\n intersection = np.logical_and(actual, predicted)\n return 2. * intersection.sum() / im_sum\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None: print(\"\\n\", end=\" \")\n else: print(\"\\n\", end=\" \", file=open(text_file, \"a\"))\n\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n\n fst_empty_cell = (columnwidth - 3) // 2 * \" \" + \"t/p\" + (columnwidth - 3) // 2 * \" \"\n\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n if text_file is None: print(\" \" + fst_empty_cell, end=\" \")\n else: print(\" \" + fst_empty_cell, end=\" \", file = open(text_file, \"a\"))\n\n for label in labels:\n if text_file is None: print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n else: print(\"%{0}s\".format(columnwidth) % label, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n # Print rows\n for i, label1 in enumerate(labels):\n if text_file is None: print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n else: print(\" %{0}s\".format(columnwidth) % label1, end=\" \", file = open(text_file, \"a\"))\n for j in range(len(labels)):\n cell = \"%{}d\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None: print(cell, end=\" \")\n else: print(cell, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes)==2:\n mean_auc = roc_auc_score(y_true, y_proba[:,1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')\n\n # mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')\n # ovo should be better, but average is not clear from docs\n # mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')\n\n if print_conf:\n if text_file is not None:\n print(\"\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}\".format(100*mcc, 100*f1, 100*mean_auc), end=\" \", file=open(text_file, \"a\"))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n\n return mean_auc, mcc, f1",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import os
CSRF_ENABLED = True
basedir = os.path.abspath(os.path.dirname(__file__))
# Heroku vs. Local Configs
if os.environ.get('HEROKU') is None:
# Database path
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# CSRF Key
SECRET_KEY = os.urandom(24)
# Pocket API
CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'
# News API Credentials
TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'
NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'
else:
# Database path
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# CSRF Key
SECRET_KEY = os.environ['CSRF_SECRET_KEY']
# Pocket API
CONSUMER_KEY = os.environ['POCKET_KEY']
# News API Credentials
TROVE_KEY = os.environ['TROVE_KEY']
NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']
# Path where we store the migration data files
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
normal
|
{
"blob_id": "0656aba517023c003e837d5ad04daeb364f7fda8",
"index": 4688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\n<mask token>\n",
"step-3": "<mask token>\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n",
"step-4": "import os\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\nif os.environ.get('HEROKU') is None:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n SECRET_KEY = os.urandom(24)\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n CONSUMER_KEY = os.environ['POCKET_KEY']\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n",
"step-5": "import os\n\nCSRF_ENABLED = True\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Heroku vs. Local Configs\nif os.environ.get('HEROKU') is None:\n # Database path\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n # CSRF Key\n SECRET_KEY = os.urandom(24)\n # Pocket API\n CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'\n # News API Credentials\n TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'\n NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'\nelse:\n # Database path\n SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']\n # CSRF Key\n SECRET_KEY = os.environ['CSRF_SECRET_KEY']\n # Pocket API\n CONSUMER_KEY = os.environ['POCKET_KEY']\n # News API Credentials\n TROVE_KEY = os.environ['TROVE_KEY']\n NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']\n\n# Path where we store the migration data files\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ob import *
if __name__ == "__main__":
# Game starts
print('New game!')
# Deal
deck = Deck()
deck.shuffle()
players = deck.deal()
# Bid
auction = Auction(players)
auction.bid()
# Play
tricks = Tricks(auction)
tricks.play()
|
normal
|
{
"blob_id": "06161b1f45e435d0273dd193229ad2ecfd46c625",
"index": 9002,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n print('New game!')\n deck = Deck()\n deck.shuffle()\n players = deck.deal()\n auction = Auction(players)\n auction.bid()\n tricks = Tricks(auction)\n tricks.play()\n",
"step-3": "from ob import *\nif __name__ == '__main__':\n print('New game!')\n deck = Deck()\n deck.shuffle()\n players = deck.deal()\n auction = Auction(players)\n auction.bid()\n tricks = Tricks(auction)\n tricks.play()\n",
"step-4": "from ob import *\n\nif __name__ == \"__main__\":\n # Game starts\n print('New game!')\n\n # Deal\n deck = Deck()\n deck.shuffle()\n players = deck.deal()\n\n # Bid\n auction = Auction(players)\n auction.bid()\n\n # Play\n tricks = Tricks(auction)\n tricks.play()\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print(" whats your name boi ?")
name = input();
if name == "arrya":
print("u are a boi");
elif name == "jon":
print("basterd")
elif name == "ned":
print("you are dead man")
elif name == "rob":
print("the king in the north")
else:
print("carry on")
|
normal
|
{
"blob_id": "483a5e95a7bfca2cc6b1e7e81740620468fb5623",
"index": 9646,
"step-1": "<mask token>\n",
"step-2": "print(' whats your name boi ?')\n<mask token>\nif name == 'arrya':\n print('u are a boi')\nelif name == 'jon':\n print('basterd')\nelif name == 'ned':\n print('you are dead man')\nelif name == 'rob':\n print('the king in the north')\nelse:\n print('carry on')\n",
"step-3": "print(' whats your name boi ?')\nname = input()\nif name == 'arrya':\n print('u are a boi')\nelif name == 'jon':\n print('basterd')\nelif name == 'ned':\n print('you are dead man')\nelif name == 'rob':\n print('the king in the north')\nelse:\n print('carry on')\n",
"step-4": "print(\" whats your name boi ?\")\r\nname = input();\r\nif name == \"arrya\":\r\n print(\"u are a boi\");\r\nelif name == \"jon\":\r\n print(\"basterd\")\r\nelif name == \"ned\":\r\n print(\"you are dead man\")\r\nelif name == \"rob\":\r\n print(\"the king in the north\")\r\nelse:\r\n print(\"carry on\")\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
n = len(changed)
if n % 2 != 0:
return []
freq = Counter(changed)
changed.sort()
ans = []
for num in changed:
if num in freq and freq[num] > 0:
freq[num] -= 1
double_num = 2 * num
if double_num in freq and freq[double_num] > 0:
ans.append(num)
freq[double_num] -= 1
else:
return []
return ans
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from collections import Counter
class Solution(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
n = len(changed)
if n % 2 != 0:
return []
freq = Counter(changed)
changed.sort()
ans = []
for num in changed:
if num in freq and freq[num] > 0:
freq[num] -= 1
double_num = 2 * num
if double_num in freq and freq[double_num] > 0:
ans.append(num)
freq[double_num] -= 1
else:
return []
return ans
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "d5acda0d5d066d381a7f6310eb4fe6280d7e84de",
"index": 5309,
"step-1": "<mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom collections import Counter\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
5,
6
]
}
|
[
2,
3,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = 'localhost', 8886
sock.connect(server_address)
data = 'TCP'
length = len(data)
ret = bytearray([])
for byte in data.encode('utf-8'):
ret.append(byte)
sock.sendall(ret)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = 'localhost', 8886
sock.connect(server_address)
data = 'TCP'
length = len(data)
ret = bytearray([])
for byte in data.encode('utf-8'):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = 'localhost', 8886
sock.connect(server_address)
data = 'TCP'
length = len(data)
ret = bytearray([])
for byte in data.encode('utf-8'):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import socket
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 8886)
sock.connect(server_address)
data = "TCP"
length = len(data)
ret = bytearray([])
for byte in data.encode("utf-8"):
ret.append(byte)
sock.sendall(ret)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "c6fd848bb3d845a50b928c18a51f296a500e7746",
"index": 2922,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\n\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = 'localhost', 8886\n sock.connect(server_address)\n data = 'TCP'\n length = len(data)\n ret = bytearray([])\n for byte in data.encode('utf-8'):\n ret.append(byte)\n sock.sendall(ret)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('localhost', 8886)\n sock.connect(server_address)\n\n data = \"TCP\"\n length = len(data)\n ret = bytearray([])\n for byte in data.encode(\"utf-8\"):\n ret.append(byte)\n sock.sendall(ret)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List
import pytest
from raiden import waiting
from raiden.api.python import RaidenAPI
from raiden.raiden_service import RaidenService
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.transfer import block_offset_timeout
from raiden.transfer import views
from raiden.utils.typing import BlockTimeout
@raise_on_failure
@pytest.mark.parametrize("channels_per_node", [CHAIN])
@pytest.mark.parametrize("number_of_nodes", [3])
def test_leave_token_network(raiden_network: List[RaidenService], token_addresses):
registry_address = raiden_network[0].default_registry.address
token_address = token_addresses[0]
_, app1, _ = raiden_network
channels = views.list_channelstate_for_tokennetwork(
chain_state=views.state_from_raiden(app1),
token_network_registry_address=registry_address,
token_address=token_address,
)
timeout = block_offset_timeout(
app1, "Channels not settled in time", BlockTimeout(channels[0].settle_timeout * 10)
)
with timeout:
RaidenAPI(app1).token_network_leave(registry_address, token_address)
waiting.wait_for_settle(
raiden=app1,
token_network_registry_address=registry_address,
token_address=token_address,
channel_ids=[channel.identifier for channel in channels],
retry_timeout=0.1,
)
|
normal
|
{
"blob_id": "c4a13069b5add538589886b5e282d4fc9f2b72ad",
"index": 6807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@raise_on_failure\n@pytest.mark.parametrize('channels_per_node', [CHAIN])\n@pytest.mark.parametrize('number_of_nodes', [3])\ndef test_leave_token_network(raiden_network: List[RaidenService],\n token_addresses):\n registry_address = raiden_network[0].default_registry.address\n token_address = token_addresses[0]\n _, app1, _ = raiden_network\n channels = views.list_channelstate_for_tokennetwork(chain_state=views.\n state_from_raiden(app1), token_network_registry_address=\n registry_address, token_address=token_address)\n timeout = block_offset_timeout(app1, 'Channels not settled in time',\n BlockTimeout(channels[0].settle_timeout * 10))\n with timeout:\n RaidenAPI(app1).token_network_leave(registry_address, token_address)\n waiting.wait_for_settle(raiden=app1, token_network_registry_address\n =registry_address, token_address=token_address, channel_ids=[\n channel.identifier for channel in channels], retry_timeout=0.1)\n",
"step-3": "from typing import List\nimport pytest\nfrom raiden import waiting\nfrom raiden.api.python import RaidenAPI\nfrom raiden.raiden_service import RaidenService\nfrom raiden.tests.utils.detect_failure import raise_on_failure\nfrom raiden.tests.utils.network import CHAIN\nfrom raiden.tests.utils.transfer import block_offset_timeout\nfrom raiden.transfer import views\nfrom raiden.utils.typing import BlockTimeout\n\n\n@raise_on_failure\n@pytest.mark.parametrize('channels_per_node', [CHAIN])\n@pytest.mark.parametrize('number_of_nodes', [3])\ndef test_leave_token_network(raiden_network: List[RaidenService],\n token_addresses):\n registry_address = raiden_network[0].default_registry.address\n token_address = token_addresses[0]\n _, app1, _ = raiden_network\n channels = views.list_channelstate_for_tokennetwork(chain_state=views.\n state_from_raiden(app1), token_network_registry_address=\n registry_address, token_address=token_address)\n timeout = block_offset_timeout(app1, 'Channels not settled in time',\n BlockTimeout(channels[0].settle_timeout * 10))\n with timeout:\n RaidenAPI(app1).token_network_leave(registry_address, token_address)\n waiting.wait_for_settle(raiden=app1, token_network_registry_address\n =registry_address, token_address=token_address, channel_ids=[\n channel.identifier for channel in channels], retry_timeout=0.1)\n",
"step-4": "from typing import List\n\nimport pytest\n\nfrom raiden import waiting\nfrom raiden.api.python import RaidenAPI\nfrom raiden.raiden_service import RaidenService\nfrom raiden.tests.utils.detect_failure import raise_on_failure\nfrom raiden.tests.utils.network import CHAIN\nfrom raiden.tests.utils.transfer import block_offset_timeout\nfrom raiden.transfer import views\nfrom raiden.utils.typing import BlockTimeout\n\n\n@raise_on_failure\n@pytest.mark.parametrize(\"channels_per_node\", [CHAIN])\n@pytest.mark.parametrize(\"number_of_nodes\", [3])\ndef test_leave_token_network(raiden_network: List[RaidenService], token_addresses):\n registry_address = raiden_network[0].default_registry.address\n token_address = token_addresses[0]\n _, app1, _ = raiden_network\n\n channels = views.list_channelstate_for_tokennetwork(\n chain_state=views.state_from_raiden(app1),\n token_network_registry_address=registry_address,\n token_address=token_address,\n )\n\n timeout = block_offset_timeout(\n app1, \"Channels not settled in time\", BlockTimeout(channels[0].settle_timeout * 10)\n )\n with timeout:\n RaidenAPI(app1).token_network_leave(registry_address, token_address)\n waiting.wait_for_settle(\n raiden=app1,\n token_network_registry_address=registry_address,\n token_address=token_address,\n channel_ids=[channel.identifier for channel in channels],\n retry_timeout=0.1,\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# TODO: Add correct copyright header
import io
from unittest.mock import mock_open, patch
from django.test import TestCase
from importer.models import *
from importer.tasks import *
from importer.tests import mock_data
class MockResponse:
"""
This class will be used by the mock to replace requests.get
"""
def __init__(self, json_data, status_code, content=None, reason=" some error"):
self.json_data = json_data
self.status_code = status_code
self.reason = reason
self.content = content
def json(self):
return self.json_data
def iter_content(self, chunk_size=None):
return io.BytesIO(self.content.encode())
class GetItemIdFromItemURLTest(TestCase):
def test_get_item_id_from_item_url_with_slash(self):
"""
Testing get item id from item url if ends with /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021/"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
def test_get_item_id_from_item_url_without_slash(self):
"""
Testing get item id from item url if ends without /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
class GETRequestDataTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
:return:
"""
self.url = "https://www.loc.gov/item/mss859430021?fo=json"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_success_json_data(self, mock_get):
"""get data on success json data"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance.json())
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "bad request"}, 400)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_normal_response(self, mock_get):
"""if json false return repose object with content"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200, content="abc")
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url, json_resp=False)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance)
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"pagination": {"total": 10}}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
# Arrange
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Assert
self.assertListEqual(response, ["mss37820001"])
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
# Arrange
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Arrange
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = "mss37820001"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(
response,
[
"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg"
],
)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
# Arrange
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
# Arrange
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, True)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
# Arrange
mock_resp = MockResponse({}, 200, content=Exception("boom"))
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes_no_db_entry(
self, mock_get, mock_save
):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
|
normal
|
{
"blob_id": "b131107d2161634e2c09e0b3ab80dd322d13fbc2",
"index": 2881,
"step-1": "<mask token>\n\n\nclass GetCollectionItemidsTest(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-2": "<mask token>\n\n\nclass GETRequestDataTest(TestCase):\n <mask token>\n <mask token>\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n <mask token>\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-3": "<mask token>\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021/'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n\nclass GETRequestDataTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = 'https://www.loc.gov/item/mss859430021?fo=json'\n\n @patch('importer.tasks.requests.get')\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch('importer.tasks.requests.get')\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=\n 'abc')\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url, json_resp=False)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-4": "<mask token>\n\n\nclass MockResponse:\n <mask token>\n <mask token>\n <mask token>\n\n def iter_content(self, chunk_size=None):\n return io.BytesIO(self.content.encode())\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021/'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n\nclass GETRequestDataTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = 'https://www.loc.gov/item/mss859430021?fo=json'\n\n @patch('importer.tasks.requests.get')\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch('importer.tasks.requests.get')\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=\n 'abc')\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url, json_resp=False)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-5": "# TODO: Add correct copyright header\n\nimport io\nfrom unittest.mock import mock_open, patch\n\nfrom django.test import TestCase\n\nfrom importer.models import *\nfrom importer.tasks import *\nfrom importer.tests import mock_data\n\n\nclass MockResponse:\n \"\"\"\n This class will be used by the mock to replace requests.get\n \"\"\"\n\n def __init__(self, json_data, status_code, content=None, reason=\" some error\"):\n self.json_data = json_data\n self.status_code = status_code\n self.reason = reason\n self.content = content\n\n def json(self):\n return self.json_data\n\n def iter_content(self, chunk_size=None):\n return io.BytesIO(self.content.encode())\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n # Arrange\n url = \"https://www.loc.gov/item/mss859430021/\"\n\n # Act\n resp = get_item_id_from_item_url(url)\n\n # Assert\n self.assertEqual(resp, \"mss859430021\")\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n # Arrange\n url = \"https://www.loc.gov/item/mss859430021\"\n\n # Act\n resp = get_item_id_from_item_url(url)\n\n # Assert\n self.assertEqual(resp, \"mss859430021\")\n\n\nclass GETRequestDataTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = \"https://www.loc.gov/item/mss859430021?fo=json\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"bad request\"}, 400)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"pagination\": {\"total\": 10}}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_collection_pages(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_collection_pages(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n # Arrange\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n\n # Act\n response = get_collection_item_ids(self.url, 2)\n\n # Assert\n self.assertListEqual(response, [\"mss37820001\"])\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n # Arrange\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n\n # Act\n response = get_collection_item_ids(self.url, 2)\n\n # Arrange\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = \"mss37820001\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n # Arrange\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(\n response,\n [\n \"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg\"\n ],\n )\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, True)\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200, content=Exception(\"boom\"))\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n # Arrange\n\n collection = {\n \"collection_name\": self.name,\n \"collection_slug\": slugify(self.name),\n \"collection_task_id\": \"123\",\n \"subcollection_name\": self.project,\n \"subcollection_slug\": slugify(self.project),\n }\n CollectionTaskDetails.objects.create(**collection)\n\n mock_resp_page = MockResponse({\"pagination\": {\"total\": 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [\n mock_resp_page,\n mock_page1_result,\n mock_page2_result,\n mock_resp_item_urls,\n ]\n mock_save.return_value = None\n\n # Act\n download_write_collection_item_assets(self.name, self.project, self.url)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_collection_item_asstes_no_db_entry(\n self, mock_get, mock_save\n ):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n # Arrange\n mock_resp_page = MockResponse({\"pagination\": {\"total\": 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [\n mock_resp_page,\n mock_page1_result,\n mock_page2_result,\n mock_resp_item_urls,\n ]\n mock_save.return_value = None\n\n # Act\n download_write_collection_item_assets(self.name, self.project, self.url)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n # Arrange\n\n collection = {\n \"collection_name\": self.name,\n \"collection_slug\": slugify(self.name),\n \"collection_task_id\": \"123\",\n \"subcollection_name\": self.project,\n \"subcollection_slug\": slugify(self.project),\n }\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n\n # Act\n download_write_item_assets(self.name, self.project, self.item_id)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n # Arrange\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n\n # Act\n download_write_item_assets(self.name, self.project, self.item_id)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-ids": [
16,
25,
31,
33,
38
]
}
|
[
16,
25,
31,
33,
38
] |
<|reserved_special_token_0|>
class terrainDeJeu:
def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):
self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0, 0]
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
<|reserved_special_token_0|>
def coupeSuivante(self, idCoupe):
return (idCoupe + 1) % (2 * self.nCoupes)
<|reserved_special_token_0|>
def joueurCoupe(self, idCoupe):
return 0 if idCoupe < self.nCoupes else 1
def coupePrenable(self, idCoupe):
return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3
def deplacer(self, joueur, idCoupe):
coupeInitiale = idCoupe
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while nGraines != 0:
idCoupe = self.coupeSuivante(idCoupe)
if idCoupe != coupeInitiale:
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if joueur != joueurCoupeFinale:
if self.nourrirAdversaire(joueur, coupeFinale):
while self.joueurCoupe(idCoupe
) == joueurCoupeFinale and self.coupePrenable(idCoupe):
self.scores[joueur] += self.plateau[idCoupe]
self.plateau[idCoupe] = 0
idCoupe = self.coupePrecedente(idCoupe)
self.tour = (self.tour + 1) % 2
def grainesRestantes(self):
return np.sum(self.plateau)
<|reserved_special_token_0|>
def nourrirAdversaire(self, joueur, coupeFinale):
adversaire = (joueur + 1) % 2
admissible = False
idCoupe = self.nCoupes * (adversaire + 1) - 1
while self.joueurCoupe(idCoupe) == adversaire:
if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:
admissible = True
elif not self.coupePrenable(idCoupe):
admissible = True
idCoupe = self.coupePrecedente(idCoupe)
return admissible
def coupesAdmissiblesNourrir(self, joueur):
coupesAdmissibles = []
idCoupe = self.nCoupes * (joueur + 1) - 1
distance = 1
while self.joueurCoupe(idCoupe) == joueur:
if self.plateau[idCoupe] >= distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance += 1
return coupesAdmissibles
def coupesAdmissibles(self, joueur):
adversaire = (joueur + 1) % 2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
if len(coupesAdmissibles) == 0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2 * self.nCoupes, dtype=int)
self.finie = True
else:
coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range
(self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]
return coupesAdmissibles
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def afficherScores(self):
print('score J1.........' + str(self.scores[0]))
print('score MinMax.....' + str(self.scores[1]))
<|reserved_special_token_0|>
def jouer(self):
if not self.partieFinie():
self.afficherPlateau()
self.afficherScores()
if self.tour == 0:
self.tourDuJoueur()
else:
self.tourOrdi()
print('\n')
else:
self.afficherPlateau()
self.afficherScores()
print('Partie Finie !')
def minimax(self, joueurMaximisant, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.minimax(joueurMaximisant)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
if self.tour == joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha, self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta, self.value)
return self.value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class terrainDeJeu:
def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):
self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0, 0]
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
def clone(self):
clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.
nGrainesParCoupelleInit)
clone.plateau = self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
def coupeSuivante(self, idCoupe):
return (idCoupe + 1) % (2 * self.nCoupes)
def coupePrecedente(self, idCoupe):
return (idCoupe - 1) % (2 * self.nCoupes)
def joueurCoupe(self, idCoupe):
return 0 if idCoupe < self.nCoupes else 1
def coupePrenable(self, idCoupe):
return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3
def deplacer(self, joueur, idCoupe):
coupeInitiale = idCoupe
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while nGraines != 0:
idCoupe = self.coupeSuivante(idCoupe)
if idCoupe != coupeInitiale:
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if joueur != joueurCoupeFinale:
if self.nourrirAdversaire(joueur, coupeFinale):
while self.joueurCoupe(idCoupe
) == joueurCoupeFinale and self.coupePrenable(idCoupe):
self.scores[joueur] += self.plateau[idCoupe]
self.plateau[idCoupe] = 0
idCoupe = self.coupePrecedente(idCoupe)
self.tour = (self.tour + 1) % 2
def grainesRestantes(self):
return np.sum(self.plateau)
def grainesRestantesJoueur(self, joueur):
if joueur == 0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
def nourrirAdversaire(self, joueur, coupeFinale):
adversaire = (joueur + 1) % 2
admissible = False
idCoupe = self.nCoupes * (adversaire + 1) - 1
while self.joueurCoupe(idCoupe) == adversaire:
if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:
admissible = True
elif not self.coupePrenable(idCoupe):
admissible = True
idCoupe = self.coupePrecedente(idCoupe)
return admissible
def coupesAdmissiblesNourrir(self, joueur):
coupesAdmissibles = []
idCoupe = self.nCoupes * (joueur + 1) - 1
distance = 1
while self.joueurCoupe(idCoupe) == joueur:
if self.plateau[idCoupe] >= distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance += 1
return coupesAdmissibles
def coupesAdmissibles(self, joueur):
adversaire = (joueur + 1) % 2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
if len(coupesAdmissibles) == 0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2 * self.nCoupes, dtype=int)
self.finie = True
else:
coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range
(self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]
return coupesAdmissibles
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def partieFinie(self):
limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit
self.finie = self.grainesRestantes() == 0 or self.scores[0
] > limiteGagne or self.scores[1] > limiteGagne
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],
self.plateau[0:self.nCoupes]]))
def afficherScores(self):
print('score J1.........' + str(self.scores[0]))
print('score MinMax.....' + str(self.scores[1]))
def evaluation(self, joueur):
adversaire = (joueur + 1) % 2
return self.scores[joueur] - self.scores[adversaire]
def jouer(self):
if not self.partieFinie():
self.afficherPlateau()
self.afficherScores()
if self.tour == 0:
self.tourDuJoueur()
else:
self.tourOrdi()
print('\n')
else:
self.afficherPlateau()
self.afficherScores()
print('Partie Finie !')
def minimax(self, joueurMaximisant, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.minimax(joueurMaximisant)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
if self.tour == joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha, self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta, self.value)
return self.value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class terrainDeJeu:
def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):
self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0, 0]
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
def clone(self):
clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.
nGrainesParCoupelleInit)
clone.plateau = self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
def coupeSuivante(self, idCoupe):
return (idCoupe + 1) % (2 * self.nCoupes)
def coupePrecedente(self, idCoupe):
return (idCoupe - 1) % (2 * self.nCoupes)
def joueurCoupe(self, idCoupe):
return 0 if idCoupe < self.nCoupes else 1
def coupePrenable(self, idCoupe):
return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3
def deplacer(self, joueur, idCoupe):
coupeInitiale = idCoupe
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while nGraines != 0:
idCoupe = self.coupeSuivante(idCoupe)
if idCoupe != coupeInitiale:
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if joueur != joueurCoupeFinale:
if self.nourrirAdversaire(joueur, coupeFinale):
while self.joueurCoupe(idCoupe
) == joueurCoupeFinale and self.coupePrenable(idCoupe):
self.scores[joueur] += self.plateau[idCoupe]
self.plateau[idCoupe] = 0
idCoupe = self.coupePrecedente(idCoupe)
self.tour = (self.tour + 1) % 2
def grainesRestantes(self):
return np.sum(self.plateau)
def grainesRestantesJoueur(self, joueur):
if joueur == 0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
def nourrirAdversaire(self, joueur, coupeFinale):
adversaire = (joueur + 1) % 2
admissible = False
idCoupe = self.nCoupes * (adversaire + 1) - 1
while self.joueurCoupe(idCoupe) == adversaire:
if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:
admissible = True
elif not self.coupePrenable(idCoupe):
admissible = True
idCoupe = self.coupePrecedente(idCoupe)
return admissible
def coupesAdmissiblesNourrir(self, joueur):
coupesAdmissibles = []
idCoupe = self.nCoupes * (joueur + 1) - 1
distance = 1
while self.joueurCoupe(idCoupe) == joueur:
if self.plateau[idCoupe] >= distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance += 1
return coupesAdmissibles
def coupesAdmissibles(self, joueur):
adversaire = (joueur + 1) % 2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
if len(coupesAdmissibles) == 0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2 * self.nCoupes, dtype=int)
self.finie = True
else:
coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range
(self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]
return coupesAdmissibles
<|reserved_special_token_0|>
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur, -np.inf, np.inf)
for idCoupe in self.arbreFils.keys():
print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[
idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value == self.arbreFils[idCoupe].value:
self.deplacer(joueur, idCoupe)
break
self.jouer()
def partieFinie(self):
limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit
self.finie = self.grainesRestantes() == 0 or self.scores[0
] > limiteGagne or self.scores[1] > limiteGagne
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],
self.plateau[0:self.nCoupes]]))
def afficherScores(self):
print('score J1.........' + str(self.scores[0]))
print('score MinMax.....' + str(self.scores[1]))
def evaluation(self, joueur):
adversaire = (joueur + 1) % 2
return self.scores[joueur] - self.scores[adversaire]
def jouer(self):
if not self.partieFinie():
self.afficherPlateau()
self.afficherScores()
if self.tour == 0:
self.tourDuJoueur()
else:
self.tourOrdi()
print('\n')
else:
self.afficherPlateau()
self.afficherScores()
print('Partie Finie !')
def minimax(self, joueurMaximisant, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.minimax(joueurMaximisant)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
if self.tour == joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha, self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta, self.value)
return self.value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pygame.init()
class terrainDeJeu:
def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):
self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0, 0]
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
def clone(self):
clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.
nGrainesParCoupelleInit)
clone.plateau = self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
def coupeSuivante(self, idCoupe):
return (idCoupe + 1) % (2 * self.nCoupes)
def coupePrecedente(self, idCoupe):
return (idCoupe - 1) % (2 * self.nCoupes)
def joueurCoupe(self, idCoupe):
return 0 if idCoupe < self.nCoupes else 1
def coupePrenable(self, idCoupe):
return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3
def deplacer(self, joueur, idCoupe):
coupeInitiale = idCoupe
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while nGraines != 0:
idCoupe = self.coupeSuivante(idCoupe)
if idCoupe != coupeInitiale:
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if joueur != joueurCoupeFinale:
if self.nourrirAdversaire(joueur, coupeFinale):
while self.joueurCoupe(idCoupe
) == joueurCoupeFinale and self.coupePrenable(idCoupe):
self.scores[joueur] += self.plateau[idCoupe]
self.plateau[idCoupe] = 0
idCoupe = self.coupePrecedente(idCoupe)
self.tour = (self.tour + 1) % 2
def grainesRestantes(self):
return np.sum(self.plateau)
def grainesRestantesJoueur(self, joueur):
if joueur == 0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
def nourrirAdversaire(self, joueur, coupeFinale):
adversaire = (joueur + 1) % 2
admissible = False
idCoupe = self.nCoupes * (adversaire + 1) - 1
while self.joueurCoupe(idCoupe) == adversaire:
if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:
admissible = True
elif not self.coupePrenable(idCoupe):
admissible = True
idCoupe = self.coupePrecedente(idCoupe)
return admissible
def coupesAdmissiblesNourrir(self, joueur):
coupesAdmissibles = []
idCoupe = self.nCoupes * (joueur + 1) - 1
distance = 1
while self.joueurCoupe(idCoupe) == joueur:
if self.plateau[idCoupe] >= distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance += 1
return coupesAdmissibles
def coupesAdmissibles(self, joueur):
adversaire = (joueur + 1) % 2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
if len(coupesAdmissibles) == 0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2 * self.nCoupes, dtype=int)
self.finie = True
else:
coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range
(self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
coupesAdmissibles = self.coupesAdmissibles(joueur)
print(
"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:"
)
nCoupe = int(input())
while (nCoupe < 0 or nCoupe > self.nCoupes - 1 or not nCoupe in
coupesAdmissibles):
print(
'Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.'
)
nCoupe = int(input())
self.deplacer(joueur, nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur, -np.inf, np.inf)
for idCoupe in self.arbreFils.keys():
print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[
idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value == self.arbreFils[idCoupe].value:
self.deplacer(joueur, idCoupe)
break
self.jouer()
def partieFinie(self):
limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit
self.finie = self.grainesRestantes() == 0 or self.scores[0
] > limiteGagne or self.scores[1] > limiteGagne
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],
self.plateau[0:self.nCoupes]]))
def afficherScores(self):
print('score J1.........' + str(self.scores[0]))
print('score MinMax.....' + str(self.scores[1]))
def evaluation(self, joueur):
adversaire = (joueur + 1) % 2
return self.scores[joueur] - self.scores[adversaire]
def jouer(self):
if not self.partieFinie():
self.afficherPlateau()
self.afficherScores()
if self.tour == 0:
self.tourDuJoueur()
else:
self.tourOrdi()
print('\n')
else:
self.afficherPlateau()
self.afficherScores()
print('Partie Finie !')
def minimax(self, joueurMaximisant, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.minimax(joueurMaximisant)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):
self.arbreFils = {}
coupesPossibles = self.coupesAdmissibles(self.tour)
if self.profondeur == self.profondeurMinimax or self.finie:
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour == joueurMaximisant:
fctComparaison = max
self.value = -np.inf
else:
fctComparaison = min
self.value = np.inf
for idCoupe in coupesPossibles:
fils = self.clone()
fils.profondeur = self.profondeur + 1
fils.deplacer(fils.tour, idCoupe)
fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)
if self.profondeur < profondeurArbre:
self.arbreFils[idCoupe] = fils
self.value = fctComparaison(self.value, fils.value)
if self.tour == joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha, self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta, self.value)
return self.value
t = terrainDeJeu(nCoupes=6, nGrainesParCoupelle=4, profondeur=8)
t.jouer()
<|reserved_special_token_1|>
import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer()
|
flexible
|
{
"blob_id": "576d6bec4a91ba6f0597b76a5da5ad3ef6562b19",
"index": 9592,
"step-1": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n <mask token>\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n <mask token>\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n <mask token>\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n <mask token>\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-4": "<mask token>\npygame.init()\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n\n def tourDuJoueur(self):\n joueur = 0\n coupesAdmissibles = self.coupesAdmissibles(joueur)\n print(\n \"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\"\n )\n nCoupe = int(input())\n while (nCoupe < 0 or nCoupe > self.nCoupes - 1 or not nCoupe in\n coupesAdmissibles):\n print(\n 'Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.'\n )\n nCoupe = int(input())\n self.deplacer(joueur, nCoupe)\n self.jouer()\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\nt = terrainDeJeu(nCoupes=6, nGrainesParCoupelle=4, profondeur=8)\nt.jouer()\n",
"step-5": "import numpy as np\r\n#!pip install pygame\r\nimport pygame\r\n#from copy import deepcopy\r\npygame.init()\r\n#-----------\r\n# Modifications (Matthieu, 15/04):\r\n# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.\r\n# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)\r\n# Les indices de la liste correspondant à chaque coupe sont par exemple :\r\n# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)\r\n# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)\r\n# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy\r\n# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)\r\n# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout\r\n# Algo alpha beta\r\n# Pbs : \r\n# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini\r\n# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de % \r\n# sont gagnées par l'ia contre un algo qui joue aléatoirement\r\n# Améliorer la fonction d'évaluation qui est pour l'instant très basique\r\n##-------------\r\n# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),\r\n# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.\r\n# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.\r\n# A chaque tour, le joueur doit choisir un numéro de coupelle.\r\n# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.\r\n#\r\n# modifs du 17.03 par Léo:\r\n# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé\r\n# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant\r\n# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.\r\n#Notions de classe:\r\n#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes\r\n#Explication de l'algorithme minimax général (page 52) :\r\n#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf\r\n#Code par Léo et Paul\r\n#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)\r\n# -> se pencher sur la fonction \"partieFinie\" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..\r\n#Pb: structure d'arbre trop compliquée: (*)\r\n#l'arbre est construit à partir d'une liste selon le principe suivant:\r\n#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes\r\n#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue\r\nclass terrainDeJeu:\r\n # [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)\r\n # [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)\r\n def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur\r\n self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)\r\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\r\n self.nCoupes = nCoupes\r\n self.scores = [0,0] # scores[0] = score du joueur 0...\r\n self.tour = 0\r\n self.finie = False\r\n self.profondeurMinimax = profondeur\r\n self.arbreFils = {}\r\n \r\n \r\n #clone le terrain de jeu pour pouvoir simuler un coup par la suite\r\n def clone(self):\r\n clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)\r\n clone.plateau= self.plateau.copy()\r\n clone.scores = self.scores.copy()\r\n clone.tour = self.tour\r\n clone.finie = self.finie\r\n return clone\r\n \r\n #retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)\r\n def coupeSuivante(self,idCoupe):\r\n return (idCoupe + 1)%(2*self.nCoupes)\r\n #retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)\r\n def coupePrecedente(self,idCoupe):\r\n return (idCoupe - 1)%(2*self.nCoupes)\r\n #retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe\r\n def joueurCoupe(self,idCoupe):\r\n return 0 if idCoupe < self.nCoupes else 1\r\n #retourne si idCoupe peut être prise (contient 2 ou 3 graines)\r\n def coupePrenable(self,idCoupe):\r\n return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)\r\n def deplacer(self,joueur,idCoupe):\r\n coupeInitiale = idCoupe #id de la coupelle choisie\r\n nGraines = self.plateau[idCoupe]\r\n self.plateau[idCoupe] = 0\r\n while (nGraines != 0): #On redistribue les graines de la coupelle initiale\r\n idCoupe = self.coupeSuivante(idCoupe)\r\n if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale\r\n self.plateau[idCoupe] += 1\r\n nGraines -= 1\r\n coupeFinale = idCoupe\r\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\r\n if (joueur != joueurCoupeFinale): \r\n #on vérifie si on va affamer l'adversaire\r\n #si non, on prend les graines normalement\r\n if (self.nourrirAdversaire(joueur,coupeFinale)):\r\n while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):\r\n self.scores[joueur]+=self.plateau[idCoupe]\r\n self.plateau[idCoupe]=0\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n #si on va affamer l'adversaire :\r\n # on ne prend aucune graine donc on ne fait rien\r\n self.tour=(self.tour+1)%2\r\n \r\n #On compte le nombre de graines restantes sur le plateau\r\n def grainesRestantes(self): \r\n return np.sum(self.plateau)\r\n #on compte le nombre de graines restantes sur le plateau pour les coupes de joueur\r\n def grainesRestantesJoueur(self,joueur):\r\n if joueur==0:\r\n return np.sum(self.plateau[0:self.nCoupes])\r\n else:\r\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\r\n #détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,\r\n #Yson adversaire sera affamé ou pas \r\n #on regarde donc si il restera au moins une graine sur le terrain de l'adversaire\r\n def nourrirAdversaire(self,joueur,coupeFinale): \r\n adversaire = (joueur+1)%2 \r\n #on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)\r\n admissible = False\r\n idCoupe = (self.nCoupes*(adversaire+1))-1\r\n while (self.joueurCoupe(idCoupe)==adversaire):\r\n #si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible\r\n if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):\r\n admissible=True\r\n #si joueur peut pas prendre la coupe idCoupe le coup est admissible\r\n elif (not self.coupePrenable(idCoupe)):\r\n admissible=True\r\n idCoupe=self.coupePrecedente(idCoupe)\r\n #True si le coup est admissible pour la règle \"nourrir\"\r\n return admissible \r\n #coupes admissibles que peut jouer joueur pour nourrir son adversaire\r\n def coupesAdmissiblesNourrir(self,joueur):\r\n coupesAdmissibles = []\r\n #on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)\r\n idCoupe = (self.nCoupes*(joueur+1))-1\r\n distance = 1\r\n while (self.joueurCoupe(idCoupe)==joueur):\r\n #s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire\r\n #le coup est admissible, au moins une graine nourrira l'adversaire\r\n if self.plateau[idCoupe]>=distance:\r\n coupesAdmissibles.append(idCoupe)\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n distance +=1\r\n return coupesAdmissibles\r\n def coupesAdmissibles(self,joueur):\r\n adversaire = (joueur+1)%2\r\n if self.grainesRestantesJoueur(adversaire) == 0:\r\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\r\n #si aucun coup ne peut être joué pour nourrir l'adversaire\r\n if len(coupesAdmissibles)==0:\r\n self.scores[joueur] += self.grainesRestantes()\r\n self.plateau = np.zeros(2*self.nCoupes,dtype=int)\r\n self.finie = True\r\n #partie terminée\r\n \r\n #sinon toutes les coupes non vides sont admissibles\r\n else :\r\n coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]\r\n \r\n return coupesAdmissibles\r\n \r\n def tourDuJoueur(self):\r\n joueur = 0\r\n #si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir\r\n coupesAdmissibles = self.coupesAdmissibles(joueur)\r\n print(\"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\")\r\n nCoupe = int(input())\r\n #print(\"coupesAdmissibles\",coupesAdmissibles)\r\n while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):\r\n #cas où la coupelle n'existe pas, ou correspond à un coup non admissible\r\n print(\"Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.\")\r\n nCoupe = int(input())\r\n self.deplacer(joueur,nCoupe)\r\n self.jouer()\r\n \r\n def tourOrdi(self):\r\n joueur = 1\r\n self.profondeur = 0\r\n self.value = self.alphabeta(joueur,-np.inf,np.inf)\r\n for idCoupe in self.arbreFils.keys():\r\n print(\"coupe = \",idCoupe,\" : valeur = \",self.arbreFils[idCoupe].value)\r\n for idCoupe in self.arbreFils.keys():\r\n if self.value==self.arbreFils[idCoupe].value:\r\n self.deplacer(joueur,idCoupe)\r\n break\r\n \r\n \r\n self.jouer()\r\n \r\n def partieFinie(self):\r\n #True si le plateau ne contient plus aucune graine\r\n limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit\r\n self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)\r\n return self.finie\r\n\r\n def afficherPlateau(self):\r\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste\r\n\r\n def afficherScores(self):\r\n print(\"score J1.........\"+str(self.scores[0]))\r\n print(\"score MinMax.....\"+str(self.scores[1]))\r\n\r\n def evaluation(self,joueur):\r\n adversaire = (joueur+1)%2\r\n return self.scores[joueur]-self.scores[adversaire]\r\n \r\n \r\n #Fonction principale\r\n def jouer(self):\r\n \r\n if (not self.partieFinie()) :\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n if (self.tour==0):\r\n self.tourDuJoueur()\r\n else:\r\n self.tourOrdi()\r\n print(\"\\n\")\r\n else:\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n print(\"Partie Finie !\")\r\n\r\n #plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta\r\n def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.minimax(joueurMaximisant)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n return self.value\r\n \r\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n \r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n #coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible\r\n if self.tour==joueurMaximisant:\r\n if self.value >= beta:\r\n return self.value\r\n alpha = fctComparaison(alpha,self.value)\r\n else:\r\n if alpha >= self.value:\r\n return self.value\r\n beta = fctComparaison(beta,self.value)\r\n \r\n return self.value\r\n \r\n \r\n\r\nt = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)\r\nt.jouer()",
"step-ids": [
14,
20,
21,
24,
26
]
}
|
[
14,
20,
21,
24,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
<|reserved_special_token_0|>
print(z(i, j, dim))
<|reserved_special_token_1|>
t_dim_2 = [[1, 2], [3, 4]]
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i - 1][j - 1]
d = dim // 2
if i <= d:
if j <= d:
return z(i, j, d)
else:
j -= d
return t // 4 + z(i, j, d)
elif j <= d:
i -= d
return t // 2 + z(i, j, d)
else:
i -= d
j -= d
return 3 * t // 4 + z(i, j, d)
n = 2
i = 3
j = 3
dim = 2 ** n
print(z(i, j, dim))
<|reserved_special_token_1|>
t_dim_2 = [[1, 2], [3, 4]]
def z(i, j, dim):
t = dim ** 2
if dim == 2:
return t_dim_2[i-1][j-1]
d = dim//2
if i <= d: # I or II
if j <= d:
return z(i, j, d) #I
else:
j -= d
return t//4 + z(i, j, d) # II
else: # III or IV
if j <=d:
i -= d
return t//2 + z(i, j, d) # III
else:
i -= d
j -= d
return 3*t//4 + z(i, j, d) # IV
n = 2
i = 3
j = 3
dim = 2**n
print(z(i,j,dim))
|
flexible
|
{
"blob_id": "07ed8c12e8e5c568c897b6b632c48831267eba51",
"index": 1815,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\n<mask token>\nprint(z(i, j, dim))\n",
"step-4": "t_dim_2 = [[1, 2], [3, 4]]\n\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i - 1][j - 1]\n d = dim // 2\n if i <= d:\n if j <= d:\n return z(i, j, d)\n else:\n j -= d\n return t // 4 + z(i, j, d)\n elif j <= d:\n i -= d\n return t // 2 + z(i, j, d)\n else:\n i -= d\n j -= d\n return 3 * t // 4 + z(i, j, d)\n\n\nn = 2\ni = 3\nj = 3\ndim = 2 ** n\nprint(z(i, j, dim))\n",
"step-5": "\nt_dim_2 = [[1, 2], [3, 4]]\n\ndef z(i, j, dim):\n t = dim ** 2\n if dim == 2:\n return t_dim_2[i-1][j-1]\n\n d = dim//2\n if i <= d: # I or II\n if j <= d:\n return z(i, j, d) #I\n else:\n j -= d\n return t//4 + z(i, j, d) # II\n else: # III or IV\n if j <=d:\n i -= d\n return t//2 + z(i, j, d) # III\n else:\n i -= d\n j -= d\n return 3*t//4 + z(i, j, d) # IV\nn = 2\ni = 3\nj = 3\ndim = 2**n\nprint(z(i,j,dim))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from text_to_word_cloud import *
from collections import Counter
from preprocess import *
if __name__ == '__main__':
data = load_data('train.json')
words = text_to_words(get_all_text(data), as_set=False)
cnt = Counter(words)
save_il_to_word_cloud_file("cloudofw.txt",cnt,len(words),call_R=True)
|
normal
|
{
"blob_id": "b3bba1119bfaf0c1e684e8835259ec6fa8c42cf7",
"index": 1838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-3": "from text_to_word_cloud import *\nfrom collections import Counter\nfrom preprocess import *\nif __name__ == '__main__':\n data = load_data('train.json')\n words = text_to_words(get_all_text(data), as_set=False)\n cnt = Counter(words)\n save_il_to_word_cloud_file('cloudofw.txt', cnt, len(words), call_R=True)\n",
"step-4": "from text_to_word_cloud import *\r\nfrom collections import Counter\r\nfrom preprocess import *\r\n\r\n\r\nif __name__ == '__main__':\r\n data = load_data('train.json')\r\n words = text_to_words(get_all_text(data), as_set=False)\r\n cnt = Counter(words)\r\n save_il_to_word_cloud_file(\"cloudofw.txt\",cnt,len(words),call_R=True)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import csv
import datetime
with open('/Users/wangshibao/SummerProjects/analytics-dashboard/myapp/CrimeHistory.csv','rU') as f:
reader = csv.reader(f)
header = reader.next()
date_time = "20140501 00:00"
date_time = datetime.datetime.strptime(date_time, "%Y%m%d %H:%M")
print date_time
|
normal
|
{
"blob_id": "cfb49d78dc14e6f4b6d2357d292fd6275edec711",
"index": 6844,
"step-1": "import csv\nimport datetime\nwith open('/Users/wangshibao/SummerProjects/analytics-dashboard/myapp/CrimeHistory.csv','rU') as f:\n reader = csv.reader(f)\n header = reader.next()\n date_time = \"20140501 00:00\"\n date_time = datetime.datetime.strptime(date_time, \"%Y%m%d %H:%M\")\n print date_time\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#-*- coding:utf-8 -*-
'''
'''
from flask import Flask, jsonify
app = Flask(__name__)
app.debug = True
from datetime import timedelta
from flask import make_response, request, current_app, render_template
from functools import update_wrapper
import json
from subprocess import *
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res("bugzilla", "text", search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
#raise Exception('xyz')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res("ikb", "kb", search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":\".*%s.*\"}}}' %(query)
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)
child = Popen(["curl", es_url, "-d", str(search_dsl).lower().encode('string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
#sorted to get the latest item
#newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))
return ans_list
@app.route("/regexSearch")
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
#raise Exception('xyz')
res['res'] = 'success'
res['data'] = render_template('search_result.html', results = results)
return render_template('search_result.html', results = results)
@app.route("/DefaultError")
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5555)
|
normal
|
{
"blob_id": "70c78021a2544ea372545b037ed55298c26391d1",
"index": 1182,
"step-1": "<mask token>\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n",
"step-4": "<mask token>\napp = Flask(__name__)\napp.debug = True\n<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n",
"step-5": "#-*- coding:utf-8 -*-\n'''\n'''\nfrom flask import Flask, jsonify\napp = Flask(__name__)\napp.debug = True\nfrom datetime import timedelta\nfrom flask import make_response, request, current_app, render_template\nfrom functools import update_wrapper\nimport json\n\nfrom subprocess import * \n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'] = \\\n \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\ndef getBzResult(search_str):\n ans_list = get_search_res(\"bugzilla\", \"text\", search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n #raise Exception('xyz')\n return ans_list\n\ndef getIkbResult(search_str):\n ans_list = get_search_res(\"ikb\", \"kb\", search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\\\".*%s.*\\\"}}}' %(query)\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)\n child = Popen([\"curl\", es_url, \"-d\", str(search_dsl).lower().encode('string-escape')], stdout=PIPE) \n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {} \n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n #sorted to get the latest item\n #newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))\n \n return ans_list\n\n@app.route(\"/regexSearch\")\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n #raise Exception('xyz')\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results = results)\n\n return render_template('search_result.html', results = results)\n\n@app.route(\"/DefaultError\")\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5555)\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
<|reserved_special_token_0|>
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True)
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=
models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(help_text='Posted on: ', default=
timezone.now, null=True, blank=True)
cast_members = models.ManyToManyField(Cast, related_name='comics',
blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs)
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = 'Comic Manager'
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = 'Header Images'
def __str__(self):
return self.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Issue(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True)
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=
models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(help_text='Posted on: ', default=
timezone.now, null=True, blank=True)
cast_members = models.ManyToManyField(Cast, related_name='comics',
blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs)
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = 'Comic Manager'
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = 'Header Images'
def __str__(self):
return self.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Issue(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.title
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True)
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=
models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(help_text='Posted on: ', default=
timezone.now, null=True, blank=True)
cast_members = models.ManyToManyField(Cast, related_name='comics',
blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs)
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = 'Comic Manager'
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = 'Header Images'
def __str__(self):
return self.title
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from pprint import pprint
class Cast(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
image = models.ImageField(upload_to='cast', blank=True, null=True)
description = models.CharField(max_length=400, blank=True, null=True)
def __str__(self):
return self.name
class Issue(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
image = models.ImageField(upload_to='issues', blank=True, null=True)
issue_number = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True)
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=
models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(help_text='Posted on: ', default=
timezone.now, null=True, blank=True)
cast_members = models.ManyToManyField(Cast, related_name='comics',
blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs)
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = 'Comic Manager'
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = 'Header Images'
def __str__(self):
return self.title
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from pprint import pprint
class Cast(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
image = models.ImageField(upload_to='cast', blank=True, null=True)
description = models.CharField(max_length=400, blank=True, null=True)
def __str__(self):
return self.name
class Issue(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
image = models.ImageField(upload_to='issues', blank=True, null=True)
issue_number = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True )
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(
help_text="Posted on: ",
default = timezone.now, null=True, blank=True
)
cast_members = models.ManyToManyField(Cast, related_name="comics", blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
# TODO: ADD ISSUE 3 LOGIC WHEN WE GET THERE
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
# print('ISSUE NUM: ', issue_num)
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
# print ('SORT ORDER: ', order)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs) # Call the "real" save() method.
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = ("Comic Manager")
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
# TODO - automate this so that anytime a comic is saved it checks last page status and runs here
# update all Comic instances to have this last page
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = ('Header Images')
def __str__(self):
return self.title
|
flexible
|
{
"blob_id": "45dc9d362a2ddfd408f93452bda0b7338057ca81",
"index": 8322,
"step-1": "<mask token>\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-2": "<mask token>\n\n\nclass Issue(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-3": "<mask token>\n\n\nclass Issue(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\nfrom pprint import pprint\n\n\nclass Cast(models.Model):\n name = models.CharField(max_length=50, blank=True, null=True)\n image = models.ImageField(upload_to='cast', blank=True, null=True)\n description = models.CharField(max_length=400, blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Issue(models.Model):\n title = models.CharField(max_length=200, blank=True, null=True)\n image = models.ImageField(upload_to='issues', blank=True, null=True)\n issue_number = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom pprint import pprint\n\nclass Cast(models.Model):\n name = models.CharField(max_length=50, blank=True, null=True)\n image = models.ImageField(upload_to='cast', blank=True, null=True)\n description = models.CharField(max_length=400, blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Issue(models.Model):\n title = models.CharField(max_length=200, blank=True, null=True)\n image = models.ImageField(upload_to='issues', blank=True, null=True)\n issue_number = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True )\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(\n help_text=\"Posted on: \",\n default = timezone.now, null=True, blank=True \n )\n cast_members = models.ManyToManyField(Cast, related_name=\"comics\", blank=True)\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n # TODO: ADD ISSUE 3 LOGIC WHEN WE GET THERE\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n # print('ISSUE NUM: ', issue_num)\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number) \n # print ('SORT ORDER: ', order)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs) # Call the \"real\" save() method.\n\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n class Meta:\n verbose_name_plural = (\"Comic Manager\")\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n # TODO - automate this so that anytime a comic is saved it checks last page status and runs here\n # update all Comic instances to have this last page\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n class Meta: \n verbose_name_plural = ('Header Images')\n\n def __str__(self):\n return self.title\n\n",
"step-ids": [
12,
13,
14,
19,
20
]
}
|
[
12,
13,
14,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=
'0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',
password='#Marshall2', user_agent=
'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only = True
for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(
limit=None):
url = str(submission.url)
if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):
urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')
break
<|reserved_special_token_1|>
import urllib.request
import praw
from praw import reddit
from praw.models.listing.mixins import submission
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=
'0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',
password='#Marshall2', user_agent=
'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only = True
for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(
limit=None):
url = str(submission.url)
if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):
urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')
break
<|reserved_special_token_1|>
import urllib.request
import praw
from praw import reddit
from praw.models.listing.mixins import submission
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ',
client_secret='0W_86zufGFCJlSE4lK3CwF_0UEQEQw',
username='MarshallBranin',
password='#Marshall2',
user_agent='macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only=True
# Iterate through top submissions
for submission in praw.reddit.Subreddit(reddit, display_name=f"{sub}").new(limit=None):
# Get the link of the submission
url = str(submission.url)
# Check if the link is an image
if url.endswith("jpg") or url.endswith("jpeg") or url.endswith("png"):
# Retrieve the image and save it in current folder
urllib.request.urlretrieve(url, "instagram/INSTAGRAM.jpg")
break
|
flexible
|
{
"blob_id": "d19310a45a684a7bbb456555a954439df8ae92b6",
"index": 1392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download_subreddit(sub):\n reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=\n '0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',\n password='#Marshall2', user_agent=\n 'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')\n reddit.read_only = True\n for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(\n limit=None):\n url = str(submission.url)\n if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):\n urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')\n break\n",
"step-3": "import urllib.request\nimport praw\nfrom praw import reddit\nfrom praw.models.listing.mixins import submission\n\n\ndef download_subreddit(sub):\n reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=\n '0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',\n password='#Marshall2', user_agent=\n 'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')\n reddit.read_only = True\n for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(\n limit=None):\n url = str(submission.url)\n if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):\n urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')\n break\n",
"step-4": "import urllib.request\nimport praw\nfrom praw import reddit\nfrom praw.models.listing.mixins import submission\n\n\ndef download_subreddit(sub):\n reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ',\n client_secret='0W_86zufGFCJlSE4lK3CwF_0UEQEQw',\n username='MarshallBranin',\n password='#Marshall2',\n user_agent='macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)') \n \n reddit.read_only=True\n\n # Iterate through top submissions\n for submission in praw.reddit.Subreddit(reddit, display_name=f\"{sub}\").new(limit=None):\n\n # Get the link of the submission\n url = str(submission.url)\n\n # Check if the link is an image\n if url.endswith(\"jpg\") or url.endswith(\"jpeg\") or url.endswith(\"png\"):\n\n # Retrieve the image and save it in current folder\n urllib.request.urlretrieve(url, \"instagram/INSTAGRAM.jpg\")\n break\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import requests as requests
from flask import Flask
from flask import request
from tools import AESCipher, tokenId, TokenKey, appId
from tools import TCApplyNeedleUrl, TCCreditNeedleUrl, TCWJNeedleUrl
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
if request.method == "POST":
json_data = request.get_data().decode('utf-8')
_data = json.loads(json_data)
orderNo = _data['orderNo']
name = _data['name']
idcard = _data['idcard']
mobile = _data['mobile']
json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})
param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')), encoding="utf-8")
parameter = ("param=%s" % (param))
parameterXY = ("name=%s,idCard=%s,mobile=%s" % (name, idcard, mobile))
XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY, TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard': idcard,
'mobile': mobile}
WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCWJNeedleUrl), 'appId': appId, 'param': param}
ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCApplyNeedleUrl), 'appId': appId, 'param': param}
r1 = requests.post(TCCreditNeedleUrl, XYTZparams)
TCdata = r1.text
print(TCdata)
r2 = requests.post(TCWJNeedleUrl,WJTZparams)
print(r2.text)
rep = json.loads(r2.text)
if rep["status"] == 0:
data = rep["data"]
TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print("TCdata1解密后", TCdata1)
r3 = requests.post(TCApplyNeedleUrl,ANparams)
print(r3.text)
rep = json.loads(r3.text)
if rep["status"] == 0:
data = rep["data"]
TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))
print("TCdata2解密后", TCdata2)
return json.dumps(TCdata2)
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "4652cd5548b550cc21d126fc4fbe3e316ecb71b2",
"index": 143,
"step-1": "<mask token>\n\n\n@app.route('/', methods=['POST'])\ndef hello_world():\n if request.method == 'POST':\n json_data = request.get_data().decode('utf-8')\n _data = json.loads(json_data)\n orderNo = _data['orderNo']\n name = _data['name']\n idcard = _data['idcard']\n mobile = _data['mobile']\n json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})\n param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),\n encoding='utf-8')\n parameter = 'param=%s' % param\n parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)\n XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,\n TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':\n idcard, 'mobile': mobile}\n WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCWJNeedleUrl), 'appId': appId, 'param': param}\n ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCApplyNeedleUrl), 'appId': appId, 'param': param}\n r1 = requests.post(TCCreditNeedleUrl, XYTZparams)\n TCdata = r1.text\n print(TCdata)\n r2 = requests.post(TCWJNeedleUrl, WJTZparams)\n print(r2.text)\n rep = json.loads(r2.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata1解密后', TCdata1)\n r3 = requests.post(TCApplyNeedleUrl, ANparams)\n print(r3.text)\n rep = json.loads(r3.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata2解密后', TCdata2)\n return json.dumps(TCdata2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/', methods=['POST'])\ndef hello_world():\n if request.method == 'POST':\n json_data = request.get_data().decode('utf-8')\n _data = json.loads(json_data)\n orderNo = _data['orderNo']\n name = _data['name']\n idcard = _data['idcard']\n mobile = _data['mobile']\n json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})\n param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),\n encoding='utf-8')\n parameter = 'param=%s' % param\n parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)\n XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,\n TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':\n idcard, 'mobile': mobile}\n WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCWJNeedleUrl), 'appId': appId, 'param': param}\n ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCApplyNeedleUrl), 'appId': appId, 'param': param}\n r1 = requests.post(TCCreditNeedleUrl, XYTZparams)\n TCdata = r1.text\n print(TCdata)\n r2 = requests.post(TCWJNeedleUrl, WJTZparams)\n print(r2.text)\n rep = json.loads(r2.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata1解密后', TCdata1)\n r3 = requests.post(TCApplyNeedleUrl, ANparams)\n print(r3.text)\n rep = json.loads(r3.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata2解密后', TCdata2)\n return json.dumps(TCdata2)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/', methods=['POST'])\ndef hello_world():\n if request.method == 'POST':\n json_data = request.get_data().decode('utf-8')\n _data = json.loads(json_data)\n orderNo = _data['orderNo']\n name = _data['name']\n idcard = _data['idcard']\n mobile = _data['mobile']\n json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})\n param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),\n encoding='utf-8')\n parameter = 'param=%s' % param\n parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)\n XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,\n TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':\n idcard, 'mobile': mobile}\n WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCWJNeedleUrl), 'appId': appId, 'param': param}\n ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCApplyNeedleUrl), 'appId': appId, 'param': param}\n r1 = requests.post(TCCreditNeedleUrl, XYTZparams)\n TCdata = r1.text\n print(TCdata)\n r2 = requests.post(TCWJNeedleUrl, WJTZparams)\n print(r2.text)\n rep = json.loads(r2.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata1解密后', TCdata1)\n r3 = requests.post(TCApplyNeedleUrl, ANparams)\n print(r3.text)\n rep = json.loads(r3.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata2解密后', TCdata2)\n return json.dumps(TCdata2)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "import json\nimport requests as requests\nfrom flask import Flask\nfrom flask import request\nfrom tools import AESCipher, tokenId, TokenKey, appId\nfrom tools import TCApplyNeedleUrl, TCCreditNeedleUrl, TCWJNeedleUrl\napp = Flask(__name__)\n\n\n@app.route('/', methods=['POST'])\ndef hello_world():\n if request.method == 'POST':\n json_data = request.get_data().decode('utf-8')\n _data = json.loads(json_data)\n orderNo = _data['orderNo']\n name = _data['name']\n idcard = _data['idcard']\n mobile = _data['mobile']\n json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})\n param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')),\n encoding='utf-8')\n parameter = 'param=%s' % param\n parameterXY = 'name=%s,idCard=%s,mobile=%s' % (name, idcard, mobile)\n XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY,\n TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard':\n idcard, 'mobile': mobile}\n WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCWJNeedleUrl), 'appId': appId, 'param': param}\n ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,\n TCApplyNeedleUrl), 'appId': appId, 'param': param}\n r1 = requests.post(TCCreditNeedleUrl, XYTZparams)\n TCdata = r1.text\n print(TCdata)\n r2 = requests.post(TCWJNeedleUrl, WJTZparams)\n print(r2.text)\n rep = json.loads(r2.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata1解密后', TCdata1)\n r3 = requests.post(TCApplyNeedleUrl, ANparams)\n print(r3.text)\n rep = json.loads(r3.text)\n if rep['status'] == 0:\n data = rep['data']\n TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))\n print('TCdata2解密后', TCdata2)\n return json.dumps(TCdata2)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import json\r\n\r\nimport requests as requests\r\nfrom flask import Flask\r\nfrom flask import request\r\n\r\nfrom tools import AESCipher, tokenId, TokenKey, appId\r\nfrom tools import TCApplyNeedleUrl, TCCreditNeedleUrl, TCWJNeedleUrl\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef hello_world():\r\n if request.method == \"POST\":\r\n json_data = request.get_data().decode('utf-8')\r\n _data = json.loads(json_data)\r\n orderNo = _data['orderNo']\r\n name = _data['name']\r\n idcard = _data['idcard']\r\n mobile = _data['mobile']\r\n json1 = json.dumps({'name': name, 'idcard': idcard, 'mobile': mobile})\r\n param = str(AESCipher.encrypt(json1, tokenId.replace('-', '')), encoding=\"utf-8\")\r\n parameter = (\"param=%s\" % (param))\r\n parameterXY = (\"name=%s,idCard=%s,mobile=%s\" % (name, idcard, mobile))\r\n XYTZparams = {'tokenKey': TokenKey.getTokenKey(parameterXY, TCCreditNeedleUrl), 'appId': appId, 'name': name, 'idCard': idcard,\r\n 'mobile': mobile}\r\n WJTZparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCWJNeedleUrl), 'appId': appId, 'param': param}\r\n ANparams = {'tokenKey': TokenKey.getTokenKey(parameter,TCApplyNeedleUrl), 'appId': appId, 'param': param}\r\n r1 = requests.post(TCCreditNeedleUrl, XYTZparams)\r\n TCdata = r1.text\r\n print(TCdata)\r\n\r\n r2 = requests.post(TCWJNeedleUrl,WJTZparams)\r\n print(r2.text)\r\n rep = json.loads(r2.text)\r\n if rep[\"status\"] == 0:\r\n data = rep[\"data\"]\r\n TCdata1 = AESCipher.decode_data(data, tokenId.replace('-', ''))\r\n print(\"TCdata1解密后\", TCdata1)\r\n\r\n r3 = requests.post(TCApplyNeedleUrl,ANparams)\r\n print(r3.text)\r\n rep = json.loads(r3.text)\r\n if rep[\"status\"] == 0:\r\n data = rep[\"data\"]\r\n TCdata2 = AESCipher.decode_data(data, tokenId.replace('-', ''))\r\n print(\"TCdata2解密后\", TCdata2)\r\n\r\n\r\n return json.dumps(TCdata2)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def countBits(self, num: int) ->List[int]:
total = []
for i in range(num + 1):
counter = bin(i).count('1')
total.append(counter)
return total
<|reserved_special_token_1|>
class Solution:
def countBits(self, num: int) -> List[int]:
total = []
for i in range(num + 1):
counter = bin(i).count('1')
# for j in bin(i):
# if j == '1':
# counter += 1
total.append(counter)
return total
# bin(i).count('1') is the easy way to do it with built in functions
# for loop to search each char in the returned string is slower
|
flexible
|
{
"blob_id": "c6554ff18c23a61d3694e73b808f44c96f9a19c4",
"index": 2012,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def countBits(self, num: int) ->List[int]:\n total = []\n for i in range(num + 1):\n counter = bin(i).count('1')\n total.append(counter)\n return total\n",
"step-4": "class Solution:\n def countBits(self, num: int) -> List[int]:\n total = []\n for i in range(num + 1):\n counter = bin(i).count('1')\n # for j in bin(i):\n # if j == '1':\n # counter += 1\n total.append(counter)\n \n return total\n \n # bin(i).count('1') is the easy way to do it with built in functions\n # for loop to search each char in the returned string is slower\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import math
import matplotlib.pyplot as plt
def signif_conf(ts, p):
''' Given a timeseries (ts), and desired probability (p),
compute the standard deviation of ts (s) and use the
number of points in the ts (N), and the degrees of freedom (DOF)
to calculate chi. '''
s = np.std(ts)
N = ts.size
DOF = 2
# chi = chi_sqr(1.-p, DOF)
chi = chi_sqr(1.-p, DOF)
signif = ((s**2)*chi) / ((N/2)*DOF)
return signif
def fourier2(Flux, Delt,
pad=None, rad=None, norm=None, signif=0.95, display=None):
''' Subtract the mean '''
Flux = np.array(Flux)
newflux = np.array(Flux) - np.array(Flux).mean()
N = newflux.size
''' Start padding if keyword was specified '''
if pad:
base2 = int(np.log(N)/np.log(2)) + 1
if (N != 2.**(base2-1)):
np.append(newflux, np.array(long(2)**base2-N, dtype=float))
N = newflux.size
print ("Padded " + str(N) + " data points with " +
str(long(2)**(base2) - N) + " zeros.")
print ("**RECOMMEND checking against fourier spectrum of non padded "
"time series**")
''' make the frequency array '''
Freq = np.arange((N/2)+1) / (N*Delt)
''' Calculate the (forward) FFT of the form a(w) + ib(w) '''
V = np.fft.fft(newflux)
''' Calculate the power and amplitude '''
Power = 2*(abs(V)**2)
Amplitude = 2*(abs(V))
''' Since we are taking the FFT of a real time series, (not complex), the
second half is a duplicate, so it can be removed.
Also do not use the zero-eth element becuase it will just be equal to the
mean, which has been set to zero anyway '''
Freq = (Freq[1:]).flatten
Power = (Power[1:N/2]).flatten
Amplitude = (Amplitude[1:N/2]).flatten
'''
By Parseval's Theorem, the variance of a time series should be equal to the total
of its power spectrum (this is just conservation of energy). Check that you
have the correct normalization for your Power Spectrum by comparing the total
of your spectrum (with N/2 points) with the variance
print 'Variance of time series = ' + str(newflux.var)
print 'Total of Power Spectrum = ' + str(np.sum(Power))
'''
''' Get real and imaginary parts of V '''
imag = (V.imag)[1:N/2]
amp = (V.real)[1:N/2]
''' Calculate the the phase for each frequency.
In simple terms this is just arctan(y/x), since tan(phase)=y/x.
Gives phase in radians between -pi and pi, and converts to degrees
by default'''
if rad:
Phase = np.arctan2(amp, imag)
else:
Phase = np.degrees(np.arctan2(amp, imag))
'''
sig_lvl = 0.
if signif:
conf = signif
else:
conf = 0.95
sig_lvl = signif_conf(newflux, conf)
'''
conf = signif # The variable conf seems redundant...
sig_lvl = signif_conf(newflux, signif)
if norm:
var = np.var(newflux)
power = power * (N/var)
sig_lvl = sig_lvl * (N/var)
print "White noise has an expectation value of 1"
if display:
if sig_lvl != 0:
print ("Confidence level at " + str(int(conf*100)) +
" is: " + str(sig_lvl))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Freq, Power)
ax.plot(Freq, Power, '.')
# horline, sig_lvl .... ?
plt.show()
'''
The final output is an array containing the power and phase at each frequency
'''
Result = np.zeros(Power.size, 4)
Result[:,0] = Freq
Result[:,1] = Power
Result[:,2] = Phase
Result[:,3] = Amplitude
print "Result[:,0] is frequency"
print "Result[:,1] is the power spectrum"
print "Result[:,2] is the phase"
return Result
f = np.array([1, 3, 4, 5, 3, 2, 6, 4, 3, 4, 1])
blah = fourier2(f, 1)
|
normal
|
{
"blob_id": "84a4a0a16aea08ee874b09de163fd777be925f18",
"index": 3041,
"step-1": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef signif_conf(ts, p):\n ''' Given a timeseries (ts), and desired probability (p),\n compute the standard deviation of ts (s) and use the\n number of points in the ts (N), and the degrees of freedom (DOF)\n to calculate chi. '''\n s = np.std(ts)\n N = ts.size\n DOF = 2\n\n # chi = chi_sqr(1.-p, DOF)\n chi = chi_sqr(1.-p, DOF)\n\n signif = ((s**2)*chi) / ((N/2)*DOF)\n return signif\n\n\ndef fourier2(Flux, Delt,\n pad=None, rad=None, norm=None, signif=0.95, display=None):\n\n ''' Subtract the mean '''\n Flux = np.array(Flux)\n newflux = np.array(Flux) - np.array(Flux).mean()\n N = newflux.size\n\n ''' Start padding if keyword was specified '''\n if pad:\n base2 = int(np.log(N)/np.log(2)) + 1\n if (N != 2.**(base2-1)):\n np.append(newflux, np.array(long(2)**base2-N, dtype=float))\n N = newflux.size\n print (\"Padded \" + str(N) + \" data points with \" +\n str(long(2)**(base2) - N) + \" zeros.\")\n print (\"**RECOMMEND checking against fourier spectrum of non padded \"\n \"time series**\")\n\n ''' make the frequency array '''\n Freq = np.arange((N/2)+1) / (N*Delt)\n\n ''' Calculate the (forward) FFT of the form a(w) + ib(w) '''\n V = np.fft.fft(newflux)\n\n ''' Calculate the power and amplitude '''\n Power = 2*(abs(V)**2)\n Amplitude = 2*(abs(V))\n\n ''' Since we are taking the FFT of a real time series, (not complex), the\n second half is a duplicate, so it can be removed.\n Also do not use the zero-eth element becuase it will just be equal to the\n mean, which has been set to zero anyway '''\n Freq = (Freq[1:]).flatten\n Power = (Power[1:N/2]).flatten\n Amplitude = (Amplitude[1:N/2]).flatten\n\n '''\n By Parseval's Theorem, the variance of a time series should be equal to the total\n of its power spectrum (this is just conservation of energy). Check that you\n have the correct normalization for your Power Spectrum by comparing the total\n of your spectrum (with N/2 points) with the variance\n print 'Variance of time series = ' + str(newflux.var)\n print 'Total of Power Spectrum = ' + str(np.sum(Power))\n '''\n\n ''' Get real and imaginary parts of V '''\n imag = (V.imag)[1:N/2]\n amp = (V.real)[1:N/2]\n\n ''' Calculate the the phase for each frequency.\n In simple terms this is just arctan(y/x), since tan(phase)=y/x.\n Gives phase in radians between -pi and pi, and converts to degrees\n by default'''\n if rad:\n Phase = np.arctan2(amp, imag)\n else:\n Phase = np.degrees(np.arctan2(amp, imag))\n\n '''\n sig_lvl = 0.\n if signif:\n conf = signif\n else:\n conf = 0.95\n sig_lvl = signif_conf(newflux, conf)\n '''\n conf = signif # The variable conf seems redundant...\n sig_lvl = signif_conf(newflux, signif)\n\n if norm:\n var = np.var(newflux)\n power = power * (N/var)\n sig_lvl = sig_lvl * (N/var)\n print \"White noise has an expectation value of 1\"\n\n if display:\n if sig_lvl != 0:\n print (\"Confidence level at \" + str(int(conf*100)) +\n \" is: \" + str(sig_lvl))\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(Freq, Power)\n ax.plot(Freq, Power, '.')\n # horline, sig_lvl .... ?\n plt.show()\n\n '''\n The final output is an array containing the power and phase at each frequency\n '''\n Result = np.zeros(Power.size, 4)\n Result[:,0] = Freq\n Result[:,1] = Power\n Result[:,2] = Phase\n Result[:,3] = Amplitude\n print \"Result[:,0] is frequency\"\n print \"Result[:,1] is the power spectrum\"\n print \"Result[:,2] is the phase\"\n\n return Result\n\nf = np.array([1, 3, 4, 5, 3, 2, 6, 4, 3, 4, 1])\nblah = fourier2(f, 1)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Client(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(object):
<|reserved_special_token_0|>
def __init__(self, app):
self.app = app
<|reserved_special_token_0|>
def get(self, path=None):
return self.request(path, 'GET')
<|reserved_special_token_0|>
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(object):
<|reserved_special_token_0|>
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
<|reserved_special_token_0|>
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client(object):
"""Make requests to a wsgi app and return the response."""
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
def post(self, path=None, body=None):
return self.request(path, 'POST', body)
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(status_code=200, content_type=request.content_type or
'text/plain', charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
<|reserved_special_token_1|>
"""Woma objects for dealing with HTTP.
Request and Response inherit from webob's Request and Response objects, so see
http://docs.webob.org/en/latest/ for full documentation. The only things
documented here are the customizations.
"""
from webob import Request as BaseRequest
from webob import Response as BaseResponse
class Client(object):
"""Make requests to a wsgi app and return the response."""
def __init__(self, app):
self.app = app
def request(self, path, method, body=None):
path = path or '/'
request = BaseRequest.blank(path)
request.method = method
request.text = body or ''
return request.get_response(self.app)
def get(self, path=None):
return self.request(path, 'GET')
def post(self, path=None, body=None):
return self.request(path, 'POST', body)
def put(self, path=None, body=None):
return self.request(path, 'PUT', body)
class Request(BaseRequest):
"""A webob.Request with additional properties."""
@property
def kwargs(self):
"""Returns 'router.kwargs' from environ if present, or {} otherwise."""
return self.environ.get('router.kwargs', {})
class Response(BaseResponse):
"""A webob.Response that can be initialized with defaults from request."""
@classmethod
def for_request(cls, request):
"""Initialize a Response with defaults based on the request.
>>> request = Request({})
>>> request.headers['Content-Type'] = 'text/html; charset=latin1'
>>> response = Response.for_request(request)
>>> response.content_type
'text/html'
>>> response.charset
'latin1'
"""
return cls(
status_code=200,
content_type=request.content_type or 'text/plain',
charset=request.charset or 'UTF-8')
def write(self, text):
"""An alias for `response.text = text`.
>>> response = Response()
>>> response.write('some text')
>>> response.text
'some text'
"""
self.text = text
|
flexible
|
{
"blob_id": "ca11e9cf0bcfcbd714c45b5c95bd2c2044b65909",
"index": 384,
"step-1": "<mask token>\n\n\nclass Client(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-2": "<mask token>\n\n\nclass Client(object):\n <mask token>\n\n def __init__(self, app):\n self.app = app\n <mask token>\n\n def get(self, path=None):\n return self.request(path, 'GET')\n <mask token>\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-3": "<mask token>\n\n\nclass Client(object):\n <mask token>\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n <mask token>\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-4": "<mask token>\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(status_code=200, content_type=request.content_type or\n 'text/plain', charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-5": "\"\"\"Woma objects for dealing with HTTP.\n\nRequest and Response inherit from webob's Request and Response objects, so see\nhttp://docs.webob.org/en/latest/ for full documentation. The only things\ndocumented here are the customizations.\n\n\"\"\"\nfrom webob import Request as BaseRequest\nfrom webob import Response as BaseResponse\n\n\nclass Client(object):\n \"\"\"Make requests to a wsgi app and return the response.\"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def request(self, path, method, body=None):\n path = path or '/'\n request = BaseRequest.blank(path)\n request.method = method\n request.text = body or ''\n return request.get_response(self.app)\n\n def get(self, path=None):\n return self.request(path, 'GET')\n\n def post(self, path=None, body=None):\n return self.request(path, 'POST', body)\n\n def put(self, path=None, body=None):\n return self.request(path, 'PUT', body)\n\n\nclass Request(BaseRequest):\n \"\"\"A webob.Request with additional properties.\"\"\"\n\n @property\n def kwargs(self):\n \"\"\"Returns 'router.kwargs' from environ if present, or {} otherwise.\"\"\"\n return self.environ.get('router.kwargs', {})\n\n\nclass Response(BaseResponse):\n \"\"\"A webob.Response that can be initialized with defaults from request.\"\"\"\n\n @classmethod\n def for_request(cls, request):\n \"\"\"Initialize a Response with defaults based on the request.\n\n >>> request = Request({})\n >>> request.headers['Content-Type'] = 'text/html; charset=latin1'\n\n >>> response = Response.for_request(request)\n >>> response.content_type\n 'text/html'\n >>> response.charset\n 'latin1'\n\n \"\"\"\n return cls(\n status_code=200,\n content_type=request.content_type or 'text/plain',\n charset=request.charset or 'UTF-8')\n\n def write(self, text):\n \"\"\"An alias for `response.text = text`.\n\n >>> response = Response()\n >>> response.write('some text')\n >>> response.text\n 'some text'\n\n \"\"\"\n self.text = text\n",
"step-ids": [
8,
11,
12,
14,
16
]
}
|
[
8,
11,
12,
14,
16
] |
import pygame
naytto = pygame.display.set_mode((740, 500))
pygame.display.set_caption("Piirtäminen")
x = 100
y = 300
def main():
while True:
tapahtuma = pygame.event.poll()
if tapahtuma.type == pygame.QUIT:
break
naytto.fill((0, 0, 0))
pygame.draw.line(naytto, (0, 0, 255), (0, 0), (640, 400))
pygame.draw.line(naytto, (0, 255, 0), (640, 0), (0, 400))
pygame.draw.rect(naytto, (255, 0, 0), (100, 50, 150, 200))
pygame.draw.circle(naytto, (255, 255, 0), (350, 150), 40)
kuva = pygame.image.load("mycat.png").convert()
naytto.blit(kuva, (x, y))
pygame.display.flip()
main()
|
normal
|
{
"blob_id": "3fdb29797894737edae37ad7890e14cb9ce705e8",
"index": 5901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npygame.display.set_caption('Piirtäminen')\n<mask token>\n\n\ndef main():\n while True:\n tapahtuma = pygame.event.poll()\n if tapahtuma.type == pygame.QUIT:\n break\n naytto.fill((0, 0, 0))\n pygame.draw.line(naytto, (0, 0, 255), (0, 0), (640, 400))\n pygame.draw.line(naytto, (0, 255, 0), (640, 0), (0, 400))\n pygame.draw.rect(naytto, (255, 0, 0), (100, 50, 150, 200))\n pygame.draw.circle(naytto, (255, 255, 0), (350, 150), 40)\n kuva = pygame.image.load('mycat.png').convert()\n naytto.blit(kuva, (x, y))\n pygame.display.flip()\n\n\nmain()\n",
"step-3": "<mask token>\nnaytto = pygame.display.set_mode((740, 500))\npygame.display.set_caption('Piirtäminen')\nx = 100\ny = 300\n\n\ndef main():\n while True:\n tapahtuma = pygame.event.poll()\n if tapahtuma.type == pygame.QUIT:\n break\n naytto.fill((0, 0, 0))\n pygame.draw.line(naytto, (0, 0, 255), (0, 0), (640, 400))\n pygame.draw.line(naytto, (0, 255, 0), (640, 0), (0, 400))\n pygame.draw.rect(naytto, (255, 0, 0), (100, 50, 150, 200))\n pygame.draw.circle(naytto, (255, 255, 0), (350, 150), 40)\n kuva = pygame.image.load('mycat.png').convert()\n naytto.blit(kuva, (x, y))\n pygame.display.flip()\n\n\nmain()\n",
"step-4": "import pygame\nnaytto = pygame.display.set_mode((740, 500))\npygame.display.set_caption('Piirtäminen')\nx = 100\ny = 300\n\n\ndef main():\n while True:\n tapahtuma = pygame.event.poll()\n if tapahtuma.type == pygame.QUIT:\n break\n naytto.fill((0, 0, 0))\n pygame.draw.line(naytto, (0, 0, 255), (0, 0), (640, 400))\n pygame.draw.line(naytto, (0, 255, 0), (640, 0), (0, 400))\n pygame.draw.rect(naytto, (255, 0, 0), (100, 50, 150, 200))\n pygame.draw.circle(naytto, (255, 255, 0), (350, 150), 40)\n kuva = pygame.image.load('mycat.png').convert()\n naytto.blit(kuva, (x, y))\n pygame.display.flip()\n\n\nmain()\n",
"step-5": "import pygame\n\nnaytto = pygame.display.set_mode((740, 500))\npygame.display.set_caption(\"Piirtäminen\")\n\nx = 100\ny = 300\n\ndef main():\n while True:\n tapahtuma = pygame.event.poll()\n if tapahtuma.type == pygame.QUIT:\n break\n\n naytto.fill((0, 0, 0))\n pygame.draw.line(naytto, (0, 0, 255), (0, 0), (640, 400))\n \n pygame.draw.line(naytto, (0, 255, 0), (640, 0), (0, 400))\n \n pygame.draw.rect(naytto, (255, 0, 0), (100, 50, 150, 200))\n \n pygame.draw.circle(naytto, (255, 255, 0), (350, 150), 40)\n\n kuva = pygame.image.load(\"mycat.png\").convert()\n naytto.blit(kuva, (x, y))\n pygame.display.flip()\n \n\n\nmain()",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Series(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Genre(models.Model):
genre = models.CharField('Genre', max_length=50, blank=False, null=False)
description = models.TextField('About this genre', max_length=500,
blank=True, null=True)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField('Publishing House', max_length=40, blank=False,
null=False)
history = models.TextField('Other books of this house', max_length=500,
blank=True, null=True)
def __str__(self):
return self.house
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Series(models.Model):
title = models.CharField('Title of series', max_length=100, blank=False,
null=False)
description = models.TextField('About this series', max_length=500,
blank=True, null=True)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField('Genre', max_length=50, blank=False, null=False)
description = models.TextField('About this genre', max_length=500,
blank=True, null=True)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField('Publishing House', max_length=40, blank=False,
null=False)
history = models.TextField('Other books of this house', max_length=500,
blank=True, null=True)
def __str__(self):
return self.house
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Author(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.author
class Series(models.Model):
title = models.CharField('Title of series', max_length=100, blank=False,
null=False)
description = models.TextField('About this series', max_length=500,
blank=True, null=True)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField('Genre', max_length=50, blank=False, null=False)
description = models.TextField('About this genre', max_length=500,
blank=True, null=True)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField('Publishing House', max_length=40, blank=False,
null=False)
history = models.TextField('Other books of this house', max_length=500,
blank=True, null=True)
def __str__(self):
return self.house
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Author(models.Model):
author = models.CharField('Author', max_length=30, blank=False, null=False)
biography = models.TextField('About author', max_length=500, blank=True,
null=True)
def __str__(self):
return self.author
class Series(models.Model):
title = models.CharField('Title of series', max_length=100, blank=False,
null=False)
description = models.TextField('About this series', max_length=500,
blank=True, null=True)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField('Genre', max_length=50, blank=False, null=False)
description = models.TextField('About this genre', max_length=500,
blank=True, null=True)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField('Publishing House', max_length=40, blank=False,
null=False)
history = models.TextField('Other books of this house', max_length=500,
blank=True, null=True)
def __str__(self):
return self.house
<|reserved_special_token_1|>
from django.db import models
class Author(models.Model):
author = models.CharField(
"Author",
max_length=30,
blank=False,
null=False
)
biography = models.TextField(
"About author",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.author
class Series(models.Model):
title = models.CharField(
"Title of series",
max_length=100,
blank=False,
null=False
)
description = models.TextField(
"About this series",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField(
"Genre",
max_length=50,
blank=False,
null=False
)
description = models.TextField(
"About this genre",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField(
"Publishing House",
max_length=40,
blank=False,
null=False
)
history = models.TextField(
"Other books of this house",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.house
|
flexible
|
{
"blob_id": "b34ad8d7fc8df0ab86c5930ab2b5aa1f86d13ae3",
"index": 7580,
"step-1": "<mask token>\n\n\nclass Series(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-2": "<mask token>\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-3": "<mask token>\n\n\nclass Author(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-4": "<mask token>\n\n\nclass Author(models.Model):\n author = models.CharField('Author', max_length=30, blank=False, null=False)\n biography = models.TextField('About author', max_length=500, blank=True,\n null=True)\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-5": "from django.db import models\n\n\nclass Author(models.Model):\n author = models.CharField(\n \"Author\",\n max_length=30,\n blank=False,\n null=False\n )\n\n biography = models.TextField(\n \"About author\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField(\n \"Title of series\",\n max_length=100,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n \"About this series\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField(\n \"Genre\",\n max_length=50,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n \"About this genre\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField(\n \"Publishing House\",\n max_length=40,\n blank=False,\n null=False\n\n )\n\n history = models.TextField(\n \"Other books of this house\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.house\n",
"step-ids": [
7,
9,
11,
12,
14
]
}
|
[
7,
9,
11,
12,
14
] |
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from typing import Dict
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
from alibabacloud_cdt20210813 import models as cdt20210813_models
from alibabacloud_tea_util import models as util_models
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def get_endpoint(
self,
product_id: str,
region_id: str,
endpoint_rule: str,
network: str,
suffix: str,
endpoint_map: Dict[str, str],
endpoint: str,
) -> str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
self.do_rpcrequest('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_service_status(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request, runtime)
def open_cdt_service_with_options(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_service(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
self.do_rpcrequest('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_cb_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_cb_service_status(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request, runtime)
def open_cdt_cb_service_with_options(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
self.do_rpcrequest('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_cb_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
await self.do_rpcrequest_async('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_cb_service(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request, runtime)
|
normal
|
{
"blob_id": "2e5d66033c2a049ba2423d01792a629bf4b8176d",
"index": 8728,
"step-1": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n <mask token>\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n <mask token>\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n <mask token>\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-2": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n <mask token>\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-3": "<mask token>\n\n\nclass Client(OpenApiClient):\n <mask token>\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_service_with_options(self, request: cdt20210813_models.\n OpenCdtServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',\n 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-4": "<mask token>\n\n\nclass Client(OpenApiClient):\n \"\"\"\n * \"\"\"\n\n def __init__(self, config: open_api_models.Config):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self.\n _endpoint_rule, self._network, self._suffix, self._endpoint_map,\n self._endpoint)\n\n def get_endpoint(self, product_id: str, region_id: str, endpoint_rule:\n str, network: str, suffix: str, endpoint_map: Dict[str, str],\n endpoint: str) ->str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(\n endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id,\n endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest, runtime: util_models\n .RuntimeOptions) ->cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtServiceStatusResponse(), await self.do_rpcrequest_async(\n 'GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_service_status(self, request: cdt20210813_models.\n GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(self, request:\n cdt20210813_models.GetCdtServiceStatusRequest\n ) ->cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_service_with_options(self, request: cdt20210813_models.\n OpenCdtServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS',\n 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_service(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(self, request: cdt20210813_models.\n OpenCdtServiceRequest) ->cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), self.do_rpcrequest(\n 'GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n async def get_cdt_cb_service_status_with_options_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest, runtime:\n util_models.RuntimeOptions\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.\n GetCdtCbServiceStatusResponse(), await self.do_rpcrequest_async\n ('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK',\n 'json', req, runtime))\n\n def get_cdt_cb_service_status(self, request: cdt20210813_models.\n GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(self, request:\n cdt20210813_models.GetCdtCbServiceStatusRequest\n ) ->cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request,\n runtime)\n\n def open_cdt_cb_service_with_options(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest, runtime: util_models.RuntimeOptions\n ) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), self.do_rpcrequest('OpenCdtCbService', '2021-08-13',\n 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n async def open_cdt_cb_service_with_options_async(self, request:\n cdt20210813_models.OpenCdtCbServiceRequest, runtime: util_models.\n RuntimeOptions) ->cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(body=UtilClient.to_map(request))\n return TeaCore.from_map(cdt20210813_models.OpenCdtCbServiceResponse\n (), await self.do_rpcrequest_async('OpenCdtCbService',\n '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime))\n\n def open_cdt_cb_service(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(self, request: cdt20210813_models.\n OpenCdtCbServiceRequest) ->cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request,\n runtime)\n",
"step-5": "# -*- coding: utf-8 -*-\n# This file is auto-generated, don't edit it. Thanks.\nfrom typing import Dict\nfrom Tea.core import TeaCore\n\nfrom alibabacloud_tea_openapi.client import Client as OpenApiClient\nfrom alibabacloud_tea_openapi import models as open_api_models\nfrom alibabacloud_tea_util.client import Client as UtilClient\nfrom alibabacloud_endpoint_util.client import Client as EndpointUtilClient\nfrom alibabacloud_cdt20210813 import models as cdt20210813_models\nfrom alibabacloud_tea_util import models as util_models\n\n\nclass Client(OpenApiClient):\n \"\"\"\n *\\\n \"\"\"\n def __init__(\n self, \n config: open_api_models.Config,\n ):\n super().__init__(config)\n self._endpoint_rule = ''\n self.check_config(config)\n self._endpoint = self.get_endpoint('cdt', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)\n\n def get_endpoint(\n self,\n product_id: str,\n region_id: str,\n endpoint_rule: str,\n network: str,\n suffix: str,\n endpoint_map: Dict[str, str],\n endpoint: str,\n ) -> str:\n if not UtilClient.empty(endpoint):\n return endpoint\n if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):\n return endpoint_map.get(region_id)\n return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)\n\n def get_cdt_service_status_with_options(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtServiceStatusResponse(),\n self.do_rpcrequest('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def get_cdt_service_status_with_options_async(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtServiceStatusResponse(),\n await self.do_rpcrequest_async('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def get_cdt_service_status(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_service_status_with_options(request, runtime)\n\n async def get_cdt_service_status_async(\n self,\n request: cdt20210813_models.GetCdtServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_service_status_with_options_async(request, runtime)\n\n def open_cdt_service_with_options(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtServiceResponse(),\n self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def open_cdt_service_with_options_async(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def open_cdt_service(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_service_with_options(request, runtime)\n\n async def open_cdt_service_async(\n self,\n request: cdt20210813_models.OpenCdtServiceRequest,\n ) -> cdt20210813_models.OpenCdtServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_service_with_options_async(request, runtime)\n\n def get_cdt_cb_service_status_with_options(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtCbServiceStatusResponse(),\n self.do_rpcrequest('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def get_cdt_cb_service_status_with_options_async(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.GetCdtCbServiceStatusResponse(),\n await self.do_rpcrequest_async('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def get_cdt_cb_service_status(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return self.get_cdt_cb_service_status_with_options(request, runtime)\n\n async def get_cdt_cb_service_status_async(\n self,\n request: cdt20210813_models.GetCdtCbServiceStatusRequest,\n ) -> cdt20210813_models.GetCdtCbServiceStatusResponse:\n runtime = util_models.RuntimeOptions()\n return await self.get_cdt_cb_service_status_with_options_async(request, runtime)\n\n def open_cdt_cb_service_with_options(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtCbServiceResponse(),\n self.do_rpcrequest('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n async def open_cdt_cb_service_with_options_async(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n UtilClient.validate_model(request)\n req = open_api_models.OpenApiRequest(\n body=UtilClient.to_map(request)\n )\n return TeaCore.from_map(\n cdt20210813_models.OpenCdtCbServiceResponse(),\n await self.do_rpcrequest_async('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)\n )\n\n def open_cdt_cb_service(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return self.open_cdt_cb_service_with_options(request, runtime)\n\n async def open_cdt_cb_service_async(\n self,\n request: cdt20210813_models.OpenCdtCbServiceRequest,\n ) -> cdt20210813_models.OpenCdtCbServiceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.open_cdt_cb_service_with_options_async(request, runtime)\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
database.read_data()
<|reserved_special_token_0|>
print(prices.shape)
<|reserved_special_token_0|>
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',
length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100, 10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
<|reserved_special_token_1|>
from StockDatabase import StockDatabase
from RNNinner import RecurrentAnalyzer
import torch
import matplotlib.pyplot as plt
import numpy as np
database = StockDatabase()
database.read_data()
prices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',
length=2000)))
print(prices.shape)
model = RecurrentAnalyzer(100, 10).to('cpu')
model.load_state_dict(torch.load('rnn_inner'))
model.init_hidden()
model.eval()
with torch.no_grad():
preds = list(model(prices[:50, None, None])[:, 0])
for i in range(len(prices) - 50):
preds.append(model.forward_step(preds[-1][None, ...])[0])
print(preds)
print(prices[1:])
plt.plot(np.arange(len(prices) - 1), prices[1:])
plt.plot(np.arange(len(preds)), preds)
plt.show()
|
flexible
|
{
"blob_id": "8abfb6a9ca3a7a909a1e8125e8c03e29b2bacda8",
"index": 109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndatabase.read_data()\n<mask token>\nprint(prices.shape)\n<mask token>\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-3": "<mask token>\ndatabase = StockDatabase()\ndatabase.read_data()\nprices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',\n length=2000)))\nprint(prices.shape)\nmodel = RecurrentAnalyzer(100, 10).to('cpu')\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-4": "from StockDatabase import StockDatabase\nfrom RNNinner import RecurrentAnalyzer\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\ndatabase = StockDatabase()\ndatabase.read_data()\nprices = torch.tensor(database.normalize(database.get_stock_prices('AAPL',\n length=2000)))\nprint(prices.shape)\nmodel = RecurrentAnalyzer(100, 10).to('cpu')\nmodel.load_state_dict(torch.load('rnn_inner'))\nmodel.init_hidden()\nmodel.eval()\nwith torch.no_grad():\n preds = list(model(prices[:50, None, None])[:, 0])\n for i in range(len(prices) - 50):\n preds.append(model.forward_step(preds[-1][None, ...])[0])\n print(preds)\n print(prices[1:])\n plt.plot(np.arange(len(prices) - 1), prices[1:])\n plt.plot(np.arange(len(preds)), preds)\n plt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
choices = ['X', 'O']
try:
# Choice of X-O given to the player
player_sym = input("Choose 'X' or 'O' : ")
# raising an exception if the variable is not X or O
if player_sym!='X' and player_sym!='O':
raise Exception("Symbol not found")
except Exception as e:
print(e.args)
else:
# Allotting the other one as the computer symbol
choices.remove(player_sym)
comp_sym = choices[0]
player_dict = {player_sym:'Player', comp_sym:'Computer'}
# creating the board
board = [' ']*9
gameEnd = False # to track when the game ends
unmarked = [i for i in range(9)] # to track all the blank boxes left
# gameOver function check if the game already has a winner
def gameOver(board, symbol):
# below is the sequence of all the possible winning combinations
if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:
# if there is a pattern match the game is over hence return True
return True
# function for marking the box with the symbol
def mark(pos, symbol):
board[pos] = symbol
unmarked.remove(pos)
# Used it for debugging : print(f"Unmarked : {unmarked}")
# function to display the board at a particular time
def displayBoard():
for i in range(len(board)):
# formatting the output for the middle elements
if i==1 or i==4 or i==7:
print(f'|{board[i]}|', end=' ')
elif i==2 or i==5:
print(f'{board[i]}\n--------') # marks the end of a line and hence bifurcates two lines
else:
print(f'{board[i]}', end=' ')
if __name__== "__main__":
# this is where the game starts
while not gameEnd: # loop until game ends
try:
player_pos = int(input("\n\nWhere would you mark? "))
# check if position index is on the board and is available for marking else raise Exception
if player_pos<0 or player_pos>8 or (player_pos not in unmarked):
raise Exception("Position out of Board")
break
except Exception as e:
print(e.args)
else:
mark(player_pos, player_sym)
# check if the game has already ended and if yes, declare the player as winner
if gameOver(board, player_sym):
displayBoard()
print("\n\nPlayer Won!!!")
break
# computer will mark on some random square that is not marked yet
comp_pos = unmarked[random.randint(0, len(unmarked)-1)]
mark(comp_pos, comp_sym)
# check if the game has already ended and if yes, declare the computer as winner
if gameOver(board, comp_sym):
displayBoard()
print("\n\nComputer WON!!!")
break
# display the board after each iteration
displayBoard()
# marks the end of the game
print("GAME OVER")
|
normal
|
{
"blob_id": "d2f6d7c779d3d6e61d9da7af01a2931fdabec828",
"index": 371,
"step-1": "<mask token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\n<mask token>\n",
"step-2": "<mask token>\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\n<mask token>\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-3": "<mask token>\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-4": "import random\nchoices = ['X', 'O']\ntry:\n player_sym = input(\"Choose 'X' or 'O' : \")\n if player_sym != 'X' and player_sym != 'O':\n raise Exception('Symbol not found')\nexcept Exception as e:\n print(e.args)\nelse:\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym: 'Player', comp_sym: 'Computer'}\nboard = [' '] * 9\ngameEnd = False\nunmarked = [i for i in range(9)]\n\n\ndef gameOver(board, symbol):\n if board[0] == board[3] == board[6] == symbol or board[1] == board[7\n ] == board[4] == symbol or board[2] == board[5] == board[8\n ] == symbol or board[0] == board[1] == board[2] == symbol or board[5\n ] == board[3] == board[4] == symbol or board[6] == board[7] == board[8\n ] == symbol or board[2] == board[4] == board[6] == symbol or board[0\n ] == board[4] == board[8] == symbol:\n return True\n\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n\n\ndef displayBoard():\n for i in range(len(board)):\n if i == 1 or i == 4 or i == 7:\n print(f'|{board[i]}|', end=' ')\n elif i == 2 or i == 5:\n print(f'{board[i]}\\n--------')\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__ == '__main__':\n while not gameEnd:\n try:\n player_pos = int(input('\\n\\nWhere would you mark? '))\n if player_pos < 0 or player_pos > 8 or player_pos not in unmarked:\n raise Exception('Position out of Board')\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n if gameOver(board, player_sym):\n displayBoard()\n print('\\n\\nPlayer Won!!!')\n break\n comp_pos = unmarked[random.randint(0, len(unmarked) - 1)]\n mark(comp_pos, comp_sym)\n if gameOver(board, comp_sym):\n displayBoard()\n print('\\n\\nComputer WON!!!')\n break\n displayBoard()\n print('GAME OVER')\n",
"step-5": "import random\n\nchoices = ['X', 'O']\ntry:\n# Choice of X-O given to the player\n player_sym = input(\"Choose 'X' or 'O' : \")\n# raising an exception if the variable is not X or O\n if player_sym!='X' and player_sym!='O':\n raise Exception(\"Symbol not found\")\nexcept Exception as e:\n print(e.args)\nelse:\n# Allotting the other one as the computer symbol\n choices.remove(player_sym)\n comp_sym = choices[0]\n player_dict = {player_sym:'Player', comp_sym:'Computer'}\n \n# creating the board\nboard = [' ']*9\ngameEnd = False # to track when the game ends\nunmarked = [i for i in range(9)] # to track all the blank boxes left\n\n\n\n# gameOver function check if the game already has a winner\ndef gameOver(board, symbol):\n# below is the sequence of all the possible winning combinations \n if board[0]==board[3]==board[6]==symbol or board[1]==board[7]==board[4]==symbol or board[2]==board[5]==board[8]==symbol or board[0]==board[1]==board[2]==symbol or board[5]==board[3]==board[4]==symbol or board[6]==board[7]==board[8]==symbol or board[2]==board[4]==board[6]==symbol or board[0]==board[4]==board[8]==symbol:\n# if there is a pattern match the game is over hence return True\n return True\n\n\n\n# function for marking the box with the symbol\n\ndef mark(pos, symbol):\n board[pos] = symbol\n unmarked.remove(pos)\n# Used it for debugging : print(f\"Unmarked : {unmarked}\")\n\n\n\n# function to display the board at a particular time\ndef displayBoard():\n for i in range(len(board)):\n# formatting the output for the middle elements\n if i==1 or i==4 or i==7:\n print(f'|{board[i]}|', end=' ')\n elif i==2 or i==5:\n print(f'{board[i]}\\n--------') # marks the end of a line and hence bifurcates two lines\n else:\n print(f'{board[i]}', end=' ')\n\n\nif __name__== \"__main__\":\n # this is where the game starts \n while not gameEnd: # loop until game ends\n try:\n player_pos = int(input(\"\\n\\nWhere would you mark? \"))\n # check if position index is on the board and is available for marking else raise Exception\n if player_pos<0 or player_pos>8 or (player_pos not in unmarked): \n raise Exception(\"Position out of Board\")\n break\n except Exception as e:\n print(e.args)\n else:\n mark(player_pos, player_sym)\n \n # check if the game has already ended and if yes, declare the player as winner\n if gameOver(board, player_sym): \n displayBoard()\n print(\"\\n\\nPlayer Won!!!\")\n break\n \n # computer will mark on some random square that is not marked yet\n comp_pos = unmarked[random.randint(0, len(unmarked)-1)]\n mark(comp_pos, comp_sym)\n \n # check if the game has already ended and if yes, declare the computer as winner\n if gameOver(board, comp_sym): \n displayBoard()\n print(\"\\n\\nComputer WON!!!\")\n break\n \n # display the board after each iteration\n displayBoard()\n \n # marks the end of the game\n print(\"GAME OVER\")",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(T):
start, end = map(int, input().split())
between = end - start
flag = 0
num = 1
while between > 0:
if flag % 2 == 1:
between -= num
num += 1
flag += 1
else:
between -= num
flag += 1
print(flag)
<|reserved_special_token_1|>
T = int(input())
for i in range(T):
start, end = map(int, input().split())
between = end - start
flag = 0
num = 1
while between > 0:
if flag % 2 == 1:
between -= num
num += 1
flag += 1
else:
between -= num
flag += 1
print(flag)
|
flexible
|
{
"blob_id": "a96761fc483c0883b058c2b045b038522c23d426",
"index": 3441,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(T):\n start, end = map(int, input().split())\n between = end - start\n flag = 0\n num = 1\n while between > 0:\n if flag % 2 == 1:\n between -= num\n num += 1\n flag += 1\n else:\n between -= num\n flag += 1\n print(flag)\n",
"step-3": "T = int(input())\nfor i in range(T):\n start, end = map(int, input().split())\n between = end - start\n flag = 0\n num = 1\n while between > 0:\n if flag % 2 == 1:\n between -= num\n num += 1\n flag += 1\n else:\n between -= num\n flag += 1\n print(flag)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.db import models
# Create your models here.
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
|
normal
|
{
"blob_id": "9a55ccf758b4b2cc440153ab3b1f97823863a848",
"index": 165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Logins(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-4": "from django.db import models\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ZhilianSpider(RedisCrawlSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start_requests(self):
url = (
'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
)
yield Request(url, headers=self.headers)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZhilianSpider(RedisCrawlSpider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start_requests(self):
url = (
'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
)
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath(
'/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(
).strip()
item['job_education'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()
).strip()
item['job_category'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(
'string(.)').extract()).replace(',', ',').replace('\r\n', ''
).strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZhilianSpider(RedisCrawlSpider):
name = 'zhilianspider'
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
rules = [Rule(LinkExtractor(restrict_xpaths=
'/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'
), follow=True), Rule(LinkExtractor(allow=
'http://jobs.zhaopin.com/(\\d.+).htm'), callback='parse_zhilian')]
def start_requests(self):
url = (
'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
)
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath(
'/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(
).strip()
item['job_education'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()
).strip()
item['job_category'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(
'string(.)').extract()).replace(',', ',').replace('\r\n', ''
).strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
<|reserved_special_token_1|>
from scrapy import Request
from ..items import ZhilianSpiderItem
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
class ZhilianSpider(RedisCrawlSpider):
name = 'zhilianspider'
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
rules = [Rule(LinkExtractor(restrict_xpaths=
'/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'
), follow=True), Rule(LinkExtractor(allow=
'http://jobs.zhaopin.com/(\\d.+).htm'), callback='parse_zhilian')]
def start_requests(self):
url = (
'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
)
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath(
'/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath(
'/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(
).strip()
item['job_education'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()
).strip()
item['job_category'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').
extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(
'string(.)').extract()).replace(',', ',').replace('\r\n', ''
).strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').
extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from scrapy import Request
from ..items import ZhilianSpiderItem
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider
class ZhilianSpider(RedisCrawlSpider):
name = 'zhilianspider'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
rules = [
Rule(LinkExtractor(restrict_xpaths='/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'), follow=True),
Rule(LinkExtractor(allow=r'http://jobs.zhaopin.com/(\d.+).htm'), callback='parse_zhilian')
]
def start_requests(self):
url = 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'
yield Request(url, headers=self.headers)
def parse_zhilian(self, response):
_ = self
item = ZhilianSpiderItem()
item['job_id'] = response.url
item['job_name'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()
item['job_company'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()
item['job_salary'] = response.xpath('/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first().strip()
item['job_education'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())
item['job_address'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()).strip()
item['job_category'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if not item['job_description']:
item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
text = ''.join(response.xpath(
'/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath('string(.)').extract()).replace(',', ',').replace('\r\n', '').strip()
if text:
item['company_profile'] = text
if item['company_profile'] == '':
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
else:
item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').extract()).replace(',', ',').replace('\r\n', '').strip()
yield item
|
flexible
|
{
"blob_id": "894fa01e16d200add20f614fd4a5ee9071777db9",
"index": 3339,
"step-1": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-3": "<mask token>\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n rules = [Rule(LinkExtractor(restrict_xpaths=\n '/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'\n ), follow=True), Rule(LinkExtractor(allow=\n 'http://jobs.zhaopin.com/(\\\\d.+).htm'), callback='parse_zhilian')]\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-4": "from scrapy import Request\nfrom ..items import ZhilianSpiderItem\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n rules = [Rule(LinkExtractor(restrict_xpaths=\n '/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'\n ), follow=True), Rule(LinkExtractor(allow=\n 'http://jobs.zhaopin.com/(\\\\d.+).htm'), callback='parse_zhilian')]\n\n def start_requests(self):\n url = (\n 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n )\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n item['job_id'] = response.url\n item['job_name'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n item['job_company'] = response.xpath(\n '/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n item['job_salary'] = response.xpath(\n '/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first(\n ).strip()\n item['job_education'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n item['job_address'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()\n ).strip()\n item['job_category'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath(\n 'string(.)').extract()).replace(',', ',').replace('\\r\\n', ''\n ).strip()\n if text:\n item['company_profile'] = text\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').\n extract()).replace(',', ',').replace('\\r\\n', '').strip()\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom scrapy import Request\nfrom ..items import ZhilianSpiderItem\nfrom scrapy.spiders import Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_redis.spiders import RedisCrawlSpider\n\n\nclass ZhilianSpider(RedisCrawlSpider):\n name = 'zhilianspider'\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'\n }\n\n rules = [\n Rule(LinkExtractor(restrict_xpaths='/html/body/div[3]/div[3]/div[3]/form/div[1]/div[1]/div[3]/ul/li[11]/a'), follow=True),\n Rule(LinkExtractor(allow=r'http://jobs.zhaopin.com/(\\d.+).htm'), callback='parse_zhilian')\n ]\n\n def start_requests(self):\n url = 'https://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E4%B8%8A%E6%B5%B7%2B%E5%8C%97%E4%BA%AC%2B%E5%B9%BF%E5%B7%9E%2B%E6%B7%B1%E5%9C%B3&kw=%E4%BC%9A%E8%AE%A1'\n yield Request(url, headers=self.headers)\n\n def parse_zhilian(self, response):\n _ = self\n item = ZhilianSpiderItem()\n\n item['job_id'] = response.url\n\n item['job_name'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h1/text()').extract_first()\n\n item['job_company'] = response.xpath('/html/body/div[5]/div[1]/div[1]/h2/a/text()').extract_first()\n\n item['job_salary'] = response.xpath('/html/body/div[6]/div[1]/ul/li[1]/strong/text()').extract_first().strip()\n\n item['job_education'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[6]/strong/text()').extract())\n\n item['job_address'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[1]/h2/text()').extract()).strip()\n\n item['job_category'] = ''.join(response.xpath('/html/body/div[6]/div[1]/ul/li[8]/strong/a/text()').extract())\n\n item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div//p').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n if not item['job_description']:\n item['job_description'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n text = ''.join(response.xpath(\n '/html/body/div[6]/div[1]/div[1]/div/div[2]//p').xpath('string(.)').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n if text:\n item['company_profile'] = text\n\n if item['company_profile'] == '':\n item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/text()').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n else:\n item['company_profile'] = ''.join(response.xpath('/html/body/div[6]/div[1]/div[1]/div/div[2]/div/text()').extract()).replace(',', ',').replace('\\r\\n', '').strip()\n\n yield item\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from bs4 import BeautifulSoup, CData
import requests,sys,csv,json,os, urllib.request, re
import json
url2 = "http://ufm.edu/Estudios"
def estudios(Minisoup):
print("2.Estudios")
#now navigate to /Estudios (better if you obtain href from the DOM)
try:
html_content = requests.get(url2).text
except:
print(f"unable to get {url2}")
sys.exit(1)
soup = BeautifulSoup(html_content, "html.parser")
#display all items from "topmenu" (8 in total)
print("Display all items from topmenu:")
b = 0
tabla = soup.find("div", { "id" : "topmenu" })
for datos in tabla.findAll("li"):
# for datos in tabla.findAll("a",{"class":"external text"}):
celda = datos.text
b += 1
print(b,"<",celda,">")
print("-------------------------------------------------------------------------------------------------------")
#display ALL "Estudios" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus)
print("Display all Estudios:")
tablas1 = soup.find("div",{"id":"mw-content-text"})
for datos in tablas1.findAll("div",{"class":"estudios"}):
celdas = datos.text
print("-",celdas)
print("-------------------------------------------------------------------------------------------------------")
#display from "leftbar" all <li> items (4 in total)
print("Display from leftbar all <li> items:")
c=0
tablas2 = soup.find("div",{"class":"leftbar"})
for datos in tablas2.findAll("li"):
#for datos in tablas2.findAll("a",{"class":"external text"}):
celdas2 = datos.text
c += 1
#print(celdas2)
print(c,"<",celdas2,">")
print("-------------------------------------------------------------------------------------------------------")
#get and display all available social media with its links (href) "class=social pull-right"
print("Get and display all available social media with its links (href) class =social pull -right:")
tablas3 = soup.find("div",{"class":"social pull-right"})
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print("-<",celdas3,">")
print("-------------------------------------------------------------------------------------------------------")
#count all <a> (just display the count)
d=0
for datos in soup.find_all('a'):
d += 1
print("count all <a: <",d,">")
print("-------------------------------------------------------------------------------------------------------")
print("=======================================================================================================")
|
normal
|
{
"blob_id": "846682072a125c76fc9ffa011109abce7c3bb5d7",
"index": 3269,
"step-1": "<mask token>\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\n<mask token>\nprint('Display all items from topmenu:')\n<mask token>\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\n<mask token>\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\n<mask token>\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\n<mask token>\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\n<mask token>\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-3": "<mask token>\nurl2 = 'http://ufm.edu/Estudios'\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\nsoup = BeautifulSoup(html_content, 'html.parser')\nprint('Display all items from topmenu:')\nb = 0\ntabla = soup.find('div', {'id': 'topmenu'})\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\ntablas1 = soup.find('div', {'id': 'mw-content-text'})\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\nc = 0\ntablas2 = soup.find('div', {'class': 'leftbar'})\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\ntablas3 = soup.find('div', {'class': 'social pull-right'})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nd = 0\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-4": "from bs4 import BeautifulSoup, CData\nimport requests, sys, csv, json, os, urllib.request, re\nimport json\nurl2 = 'http://ufm.edu/Estudios'\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\nsoup = BeautifulSoup(html_content, 'html.parser')\nprint('Display all items from topmenu:')\nb = 0\ntabla = soup.find('div', {'id': 'topmenu'})\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\ntablas1 = soup.find('div', {'id': 'mw-content-text'})\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\nc = 0\ntablas2 = soup.find('div', {'class': 'leftbar'})\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\ntablas3 = soup.find('div', {'class': 'social pull-right'})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nd = 0\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-5": "from bs4 import BeautifulSoup, CData\nimport requests,sys,csv,json,os, urllib.request, re\nimport json\n\n\nurl2 = \"http://ufm.edu/Estudios\"\ndef estudios(Minisoup):\n print(\"2.Estudios\")\n\n#now navigate to /Estudios (better if you obtain href from the DOM)\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f\"unable to get {url2}\")\n sys.exit(1)\n\nsoup = BeautifulSoup(html_content, \"html.parser\")\n\n#display all items from \"topmenu\" (8 in total)\nprint(\"Display all items from topmenu:\")\nb = 0\ntabla = soup.find(\"div\", { \"id\" : \"topmenu\" })\nfor datos in tabla.findAll(\"li\"):\n# for datos in tabla.findAll(\"a\",{\"class\":\"external text\"}):\n celda = datos.text\n b += 1\n print(b,\"<\",celda,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#display ALL \"Estudios\" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus)\nprint(\"Display all Estudios:\")\ntablas1 = soup.find(\"div\",{\"id\":\"mw-content-text\"})\nfor datos in tablas1.findAll(\"div\",{\"class\":\"estudios\"}):\n celdas = datos.text\n print(\"-\",celdas)\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#display from \"leftbar\" all <li> items (4 in total)\nprint(\"Display from leftbar all <li> items:\")\nc=0\ntablas2 = soup.find(\"div\",{\"class\":\"leftbar\"})\nfor datos in tablas2.findAll(\"li\"):\n#for datos in tablas2.findAll(\"a\",{\"class\":\"external text\"}):\n celdas2 = datos.text\n c += 1\n #print(celdas2) \n print(c,\"<\",celdas2,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#get and display all available social media with its links (href) \"class=social pull-right\"\nprint(\"Get and display all available social media with its links (href) class =social pull -right:\")\ntablas3 = soup.find(\"div\",{\"class\":\"social pull-right\"})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print(\"-<\",celdas3,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#count all <a> (just display the count)\nd=0\nfor datos in soup.find_all('a'):\n d += 1\nprint(\"count all <a: <\",d,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\nprint(\"=======================================================================================================\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def foo(x, y=5):
def bar(x):
return x + 1
return bar(y * 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def foo(x, y=5):
def bar(x):
return x + 1
return bar(y * 2)
print(foo(3))
|
flexible
|
{
"blob_id": "80d1979c5767d0ff90f464651c9d0ca6d65effb2",
"index": 6472,
"step-1": "<mask token>\n",
"step-2": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\n<mask token>\n",
"step-3": "def foo(x, y=5):\n\n def bar(x):\n return x + 1\n return bar(y * 2)\n\n\nprint(foo(3))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c, o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
|
normal
|
{
"blob_id": "7d3f4e0a5031f9ce618c568b440c7425489060a1",
"index": 4122,
"step-1": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\n<mask token>\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-3": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\nCOM = Obj('COM')\norbits = {}\norbits['COM'] = COM\neffects = [x.strip().split(')') for x in list(sys.stdin)]\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-4": "import sys\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\nCOM = Obj('COM')\norbits = {}\norbits['COM'] = COM\neffects = [x.strip().split(')') for x in list(sys.stdin)]\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .hacker import HackerRegistrationPage
from .judge import JudgeRegistrationPage
from .mentor import MentorRegistrationPage
from .organizer import OrganizerRegistrationPage
from .user import UserRegistrationPage
|
flexible
|
{
"blob_id": "34f3212b0254cbcb5e1ca535a29d4fe820dcaad8",
"index": 2978,
"step-1": "<mask token>\n",
"step-2": "from .hacker import HackerRegistrationPage\nfrom .judge import JudgeRegistrationPage\nfrom .mentor import MentorRegistrationPage\nfrom .organizer import OrganizerRegistrationPage\nfrom .user import UserRegistrationPage\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument('filename', type=isfile, help='contribution data file')
parser.add_argument('-o', '--output', help='output file name')
parser.add_argument('-n', type=int, default=0, help=
'show n greatest contributions')
parser.add_argument('-s', '--stdev', action='store_true', help=
'only plot standard deviations')
parser.add_argument('-r', metavar='residue', nargs='+', help=
'plot specific residues along time')
return parser.parse_args()
def die(s):
print >> sys.stderr, 'ERROR:', s
exit(1)
def show_usage():
print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'
def read_contrib(fname):
data = []
with open(fname, 'rt') as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([(i + 1) for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x + 0.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.xlim((x[0] - 1, x[-1] + 1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution standard deviations')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument('filename', type=isfile, help='contribution data file')
parser.add_argument('-o', '--output', help='output file name')
parser.add_argument('-n', type=int, default=0, help=
'show n greatest contributions')
parser.add_argument('-s', '--stdev', action='store_true', help=
'only plot standard deviations')
parser.add_argument('-r', metavar='residue', nargs='+', help=
'plot specific residues along time')
return parser.parse_args()
def die(s):
print >> sys.stderr, 'ERROR:', s
exit(1)
def show_usage():
print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'
def read_contrib(fname):
data = []
with open(fname, 'rt') as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([(i + 1) for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x + 0.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.xlim((x[0] - 1, x[-1] + 1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution standard deviations')
def plot_barplot(data):
x = [(i + 1) for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument('filename', type=isfile, help='contribution data file')
parser.add_argument('-o', '--output', help='output file name')
parser.add_argument('-n', type=int, default=0, help=
'show n greatest contributions')
parser.add_argument('-s', '--stdev', action='store_true', help=
'only plot standard deviations')
parser.add_argument('-r', metavar='residue', nargs='+', help=
'plot specific residues along time')
return parser.parse_args()
def die(s):
print >> sys.stderr, 'ERROR:', s
exit(1)
def show_usage():
print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'
def read_contrib(fname):
data = []
with open(fname, 'rt') as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([(i + 1) for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x + 0.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.xlim((x[0] - 1, x[-1] + 1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution standard deviations')
def plot_barplot(data):
x = [(i + 1) for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution')
<|reserved_special_token_0|>
def main():
args = parse_args()
data = read_contrib(args.filename)
if args.n:
data = sorted(data, key=lambda x: med(x[1]), reverse=True)
data = data[:args.n]
data = zip(*data)
if args.r:
plot_residues(data, args.r)
elif args.stdev:
plot_sd(data)
else:
plot_barplot(data)
if args.output:
pyplot.savefig(args.output)
else:
pyplot.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
if sys.version < '2.7':
print >> sys.stderr, 'ERROR: This script requires Python 2.7.x. Please install it and try again.'
exit(1)
try:
import matplotlib.pyplot as pyplot
import numpy
except ImportError:
print >> sys.stderr, 'ERROR:'
print >> sys.stderr, 'This script requires matplotlib and numpy. Please make sure you installed it and that your PYTHONPATH is set adequately.'
exit(1)
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument('filename', type=isfile, help='contribution data file')
parser.add_argument('-o', '--output', help='output file name')
parser.add_argument('-n', type=int, default=0, help=
'show n greatest contributions')
parser.add_argument('-s', '--stdev', action='store_true', help=
'only plot standard deviations')
parser.add_argument('-r', metavar='residue', nargs='+', help=
'plot specific residues along time')
return parser.parse_args()
def die(s):
print >> sys.stderr, 'ERROR:', s
exit(1)
def show_usage():
print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'
def read_contrib(fname):
data = []
with open(fname, 'rt') as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([(i + 1) for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x + 0.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.xlim((x[0] - 1, x[-1] + 1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution standard deviations')
def plot_barplot(data):
x = [(i + 1) for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0] - 10, ylim[1] + 10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title('Residue contribution')
def plot_residues(data, residues):
def running_average(x, N):
return numpy.convolve(x, numpy.ones((N,)) / N)[N - 1:]
if 'all' in residues:
residues = data[0]
for r in residues:
try:
i = data[0].index(r)
except:
die("No residue named '{0}'".format(r))
y = data[1][i]
pyplot.plot(y, label=r)
pyplot.legend(loc='best')
def main():
args = parse_args()
data = read_contrib(args.filename)
if args.n:
data = sorted(data, key=lambda x: med(x[1]), reverse=True)
data = data[:args.n]
data = zip(*data)
if args.r:
plot_residues(data, args.r)
elif args.stdev:
plot_sd(data)
else:
plot_barplot(data)
if args.output:
pyplot.savefig(args.output)
else:
pyplot.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""This script draws a boxplot of each atom contribution to the cavity."""
import sys
if sys.version < "2.7":
print >> sys.stderr, "ERROR: This script requires Python 2.7.x. "\
"Please install it and try again."
exit(1)
try:
import matplotlib.pyplot as pyplot
import numpy
except ImportError:
print >> sys.stderr, "ERROR:",
print >> sys.stderr, "This script requires matplotlib and numpy. "\
"Please make sure you installed it and that "\
"your PYTHONPATH is set adequately."
exit(1)
def parse_args():
import argparse
import os.path
def isfile(path):
Error = argparse.ArgumentTypeError
if not os.path.exists(path):
raise Error("No such file: '{0}'".format(path))
elif not os.path.isfile(path):
raise Error("Not a valid file: '{0}'".format(path))
return path
hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)
parser.add_argument("filename", type=isfile,
help="contribution data file")
parser.add_argument("-o", "--output",
help="output file name")
parser.add_argument("-n", type=int, default=0,
help="show n greatest contributions")
parser.add_argument("-s", "--stdev", action="store_true",
help="only plot standard deviations")
parser.add_argument("-r", metavar="residue", nargs="+",
help="plot specific residues along time")
return parser.parse_args()
def die(s):
print >> sys.stderr, "ERROR:", s
exit(1)
def show_usage():
print >> sys.stderr, "usage: python " + sys.argv[0] + " <filename.dat>"
def read_contrib(fname):
data = []
with open(fname, "rt") as f:
for line in f:
split = line.split()
k = split[0]
counts = [int(c) for c in split[2:]]
data.append((k, counts))
return data
def med(x):
x = sorted(x)
length = len(x)
if not length % 2:
return (x[length / 2] + x[(length - 1) / 2]) / 2.0
return float(x[length / 2])
def plot_sd(data):
x = numpy.array([i+1 for i in range(len(data[0]))])
d = numpy.std(data[1], axis=1)
pyplot.bar(x, d)
pyplot.xticks(x+.5, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.xlim((x[0]-1, x[-1]+1))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution standard deviations")
def plot_barplot(data):
x = [i+1 for i in range(len(data[0]))]
pyplot.boxplot(data[1])
pyplot.xticks(x, data[0], rotation=90)
ylim = pyplot.ylim()
pyplot.ylim((ylim[0]-10, ylim[1]+10))
pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)
pyplot.title("Residue contribution")
def plot_residues(data, residues):
def running_average(x, N):
return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
if "all" in residues:
residues = data[0]
for r in residues:
try:
i = data[0].index(r)
except:
die("No residue named '{0}'".format(r))
# y = running_average(data[1][i], 5)
y = data[1][i]
pyplot.plot(y, label=r)
pyplot.legend(loc="best")
def main():
args = parse_args()
data = read_contrib(args.filename)
if args.n:
data = sorted(data, key=lambda x: med(x[1]), reverse=True)
data = data[:args.n]
data = zip(*data)
if args.r:
plot_residues(data, args.r)
elif args.stdev:
plot_sd(data)
else:
plot_barplot(data)
if args.output:
pyplot.savefig(args.output)
else:
pyplot.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "9fdcaf65f070b7081afd327442dd20e3284c71eb",
"index": 7905,
"step-1": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\n<mask token>\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n data = zip(*data)\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport sys\nif sys.version < '2.7':\n print >> sys.stderr, 'ERROR: This script requires Python 2.7.x. Please install it and try again.'\n exit(1)\ntry:\n import matplotlib.pyplot as pyplot\n import numpy\nexcept ImportError:\n print >> sys.stderr, 'ERROR:'\n print >> sys.stderr, 'This script requires matplotlib and numpy. Please make sure you installed it and that your PYTHONPATH is set adequately.'\n exit(1)\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument('filename', type=isfile, help='contribution data file')\n parser.add_argument('-o', '--output', help='output file name')\n parser.add_argument('-n', type=int, default=0, help=\n 'show n greatest contributions')\n parser.add_argument('-s', '--stdev', action='store_true', help=\n 'only plot standard deviations')\n parser.add_argument('-r', metavar='residue', nargs='+', help=\n 'plot specific residues along time')\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, 'ERROR:', s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, 'usage: python ' + sys.argv[0] + ' <filename.dat>'\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, 'rt') as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([(i + 1) for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x + 0.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.xlim((x[0] - 1, x[-1] + 1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution standard deviations')\n\n\ndef plot_barplot(data):\n x = [(i + 1) for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0] - 10, ylim[1] + 10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title('Residue contribution')\n\n\ndef plot_residues(data, residues):\n\n def running_average(x, N):\n return numpy.convolve(x, numpy.ones((N,)) / N)[N - 1:]\n if 'all' in residues:\n residues = data[0]\n for r in residues:\n try:\n i = data[0].index(r)\n except:\n die(\"No residue named '{0}'\".format(r))\n y = data[1][i]\n pyplot.plot(y, label=r)\n pyplot.legend(loc='best')\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n data = zip(*data)\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"This script draws a boxplot of each atom contribution to the cavity.\"\"\"\n\n\nimport sys\n\nif sys.version < \"2.7\":\n print >> sys.stderr, \"ERROR: This script requires Python 2.7.x. \"\\\n \"Please install it and try again.\"\n exit(1)\n\ntry:\n import matplotlib.pyplot as pyplot\n import numpy\nexcept ImportError:\n print >> sys.stderr, \"ERROR:\",\n print >> sys.stderr, \"This script requires matplotlib and numpy. \"\\\n \"Please make sure you installed it and that \"\\\n \"your PYTHONPATH is set adequately.\"\n exit(1)\n\n\ndef parse_args():\n import argparse\n import os.path\n\n def isfile(path):\n Error = argparse.ArgumentTypeError\n if not os.path.exists(path):\n raise Error(\"No such file: '{0}'\".format(path))\n elif not os.path.isfile(path):\n raise Error(\"Not a valid file: '{0}'\".format(path))\n return path\n\n hf = lambda prog: argparse.HelpFormatter(prog, max_help_position=50)\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=hf)\n parser.add_argument(\"filename\", type=isfile,\n help=\"contribution data file\")\n parser.add_argument(\"-o\", \"--output\",\n help=\"output file name\")\n parser.add_argument(\"-n\", type=int, default=0,\n help=\"show n greatest contributions\")\n parser.add_argument(\"-s\", \"--stdev\", action=\"store_true\",\n help=\"only plot standard deviations\")\n parser.add_argument(\"-r\", metavar=\"residue\", nargs=\"+\",\n help=\"plot specific residues along time\")\n return parser.parse_args()\n\n\ndef die(s):\n print >> sys.stderr, \"ERROR:\", s\n exit(1)\n\n\ndef show_usage():\n print >> sys.stderr, \"usage: python \" + sys.argv[0] + \" <filename.dat>\"\n\n\ndef read_contrib(fname):\n data = []\n with open(fname, \"rt\") as f:\n for line in f:\n split = line.split()\n k = split[0]\n counts = [int(c) for c in split[2:]]\n data.append((k, counts))\n return data\n\n\ndef med(x):\n x = sorted(x)\n length = len(x)\n if not length % 2:\n return (x[length / 2] + x[(length - 1) / 2]) / 2.0\n return float(x[length / 2])\n\n\ndef plot_sd(data):\n x = numpy.array([i+1 for i in range(len(data[0]))])\n d = numpy.std(data[1], axis=1)\n pyplot.bar(x, d)\n pyplot.xticks(x+.5, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0]-10, ylim[1]+10))\n pyplot.xlim((x[0]-1, x[-1]+1))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title(\"Residue contribution standard deviations\")\n\n\ndef plot_barplot(data):\n x = [i+1 for i in range(len(data[0]))]\n pyplot.boxplot(data[1])\n pyplot.xticks(x, data[0], rotation=90)\n ylim = pyplot.ylim()\n pyplot.ylim((ylim[0]-10, ylim[1]+10))\n pyplot.subplots_adjust(left=0.1, right=0.9, top=0.95, bottom=0.2)\n pyplot.title(\"Residue contribution\")\n\n\ndef plot_residues(data, residues):\n def running_average(x, N):\n return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]\n if \"all\" in residues:\n residues = data[0]\n for r in residues:\n try:\n i = data[0].index(r)\n except:\n die(\"No residue named '{0}'\".format(r))\n# y = running_average(data[1][i], 5)\n y = data[1][i]\n pyplot.plot(y, label=r)\n pyplot.legend(loc=\"best\")\n\n\ndef main():\n args = parse_args()\n data = read_contrib(args.filename)\n\n if args.n:\n data = sorted(data, key=lambda x: med(x[1]), reverse=True)\n data = data[:args.n]\n\n data = zip(*data)\n\n if args.r:\n plot_residues(data, args.r)\n elif args.stdev:\n plot_sd(data)\n else:\n plot_barplot(data)\n\n if args.output:\n pyplot.savefig(args.output)\n else:\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
<|reserved_special_token_0|>
class Ui_Rec1(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Rec1(object):
<|reserved_special_token_0|>
def retranslateUi(self, Rec1):
_translate = QtCore.QCoreApplication.translate
Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))
self.pushButton_photo.setText(_translate('Rec1',
'Распознавание по фото'))
self.pushButton_video.setText(_translate('Rec1',
'Распознавие по видео'))
self.pushButton_camera.setText(_translate('Rec1',
'Распознавание с помощью веб-камеры'))
self.pushButton_back.setText(_translate('Rec1', 'Назад'))
self.pushButton_exit.setText(_translate('Rec1', 'Выход'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Rec1(object):
def setupUi(self, Rec1):
Rec1.setObjectName('Rec1')
Rec1.setFixedSize(450, 200)
ico = QtGui.QIcon('mylogo.png')
Rec1.setWindowIcon(ico)
font = QtGui.QFont()
font.setFamily('Times New Roman')
font.setPointSize(14)
self.centralwidget = QtWidgets.QWidget(Rec1)
self.centralwidget.setObjectName('centralwidget')
self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))
self.pushButton_photo.setFont(font)
self.pushButton_photo.setObjectName('pushButton_photo')
self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName('pushButton_video')
self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName('pushButton_camera')
self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))
self.pushButton_back.setFont(font)
self.pushButton_back.setObjectName('pushButton_back')
self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))
self.pushButton_exit.setFont(font)
self.pushButton_exit.setObjectName('pushButton_exit')
Rec1.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Rec1)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName('menubar')
Rec1.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Rec1)
self.statusbar.setObjectName('statusbar')
Rec1.setStatusBar(self.statusbar)
self.retranslateUi(Rec1)
QtCore.QMetaObject.connectSlotsByName(Rec1)
def retranslateUi(self, Rec1):
_translate = QtCore.QCoreApplication.translate
Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))
self.pushButton_photo.setText(_translate('Rec1',
'Распознавание по фото'))
self.pushButton_video.setText(_translate('Rec1',
'Распознавие по видео'))
self.pushButton_camera.setText(_translate('Rec1',
'Распознавание с помощью веб-камеры'))
self.pushButton_back.setText(_translate('Rec1', 'Назад'))
self.pushButton_exit.setText(_translate('Rec1', 'Выход'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Rec1(object):
def setupUi(self, Rec1):
Rec1.setObjectName('Rec1')
Rec1.setFixedSize(450, 200)
ico = QtGui.QIcon('mylogo.png')
Rec1.setWindowIcon(ico)
font = QtGui.QFont()
font.setFamily('Times New Roman')
font.setPointSize(14)
self.centralwidget = QtWidgets.QWidget(Rec1)
self.centralwidget.setObjectName('centralwidget')
self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))
self.pushButton_photo.setFont(font)
self.pushButton_photo.setObjectName('pushButton_photo')
self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName('pushButton_video')
self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName('pushButton_camera')
self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))
self.pushButton_back.setFont(font)
self.pushButton_back.setObjectName('pushButton_back')
self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))
self.pushButton_exit.setFont(font)
self.pushButton_exit.setObjectName('pushButton_exit')
Rec1.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Rec1)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName('menubar')
Rec1.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Rec1)
self.statusbar.setObjectName('statusbar')
Rec1.setStatusBar(self.statusbar)
self.retranslateUi(Rec1)
QtCore.QMetaObject.connectSlotsByName(Rec1)
def retranslateUi(self, Rec1):
_translate = QtCore.QCoreApplication.translate
Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))
self.pushButton_photo.setText(_translate('Rec1',
'Распознавание по фото'))
self.pushButton_video.setText(_translate('Rec1',
'Распознавие по видео'))
self.pushButton_camera.setText(_translate('Rec1',
'Распознавание с помощью веб-камеры'))
self.pushButton_back.setText(_translate('Rec1', 'Назад'))
self.pushButton_exit.setText(_translate('Rec1', 'Выход'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Rec1(object):
def setupUi(self, Rec1):
Rec1.setObjectName("Rec1")
Rec1.setFixedSize(450, 200)
ico = QtGui.QIcon("mylogo.png")
Rec1.setWindowIcon(ico)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(14)
self.centralwidget = QtWidgets.QWidget(Rec1)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))
self.pushButton_photo.setFont(font)
self.pushButton_photo.setObjectName("pushButton_photo")
self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))
self.pushButton_back.setFont(font)
self.pushButton_back.setObjectName("pushButton_back")
self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))
self.pushButton_exit.setFont(font)
self.pushButton_exit.setObjectName("pushButton_exit")
Rec1.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Rec1)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
Rec1.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Rec1)
self.statusbar.setObjectName("statusbar")
Rec1.setStatusBar(self.statusbar)
self.retranslateUi(Rec1)
QtCore.QMetaObject.connectSlotsByName(Rec1)
def retranslateUi(self, Rec1):
_translate = QtCore.QCoreApplication.translate
Rec1.setWindowTitle(_translate("Rec1", "Recognition"))
self.pushButton_photo.setText(_translate("Rec1", "Распознавание по фото"))
self.pushButton_video.setText(_translate("Rec1", "Распознавие по видео"))
self.pushButton_camera.setText(_translate("Rec1", "Распознавание с помощью веб-камеры"))
self.pushButton_back.setText(_translate("Rec1", "Назад"))
self.pushButton_exit.setText(_translate("Rec1", "Выход"))
|
flexible
|
{
"blob_id": "c500ecaa66672ac960dc548c3f3882e4bc196745",
"index": 6870,
"step-1": "<mask token>\n\n\nclass Ui_Rec1(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Rec1(object):\n <mask token>\n\n def retranslateUi(self, Rec1):\n _translate = QtCore.QCoreApplication.translate\n Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))\n self.pushButton_photo.setText(_translate('Rec1',\n 'Распознавание по фото'))\n self.pushButton_video.setText(_translate('Rec1',\n 'Распознавие по видео'))\n self.pushButton_camera.setText(_translate('Rec1',\n 'Распознавание с помощью веб-камеры'))\n self.pushButton_back.setText(_translate('Rec1', 'Назад'))\n self.pushButton_exit.setText(_translate('Rec1', 'Выход'))\n",
"step-3": "<mask token>\n\n\nclass Ui_Rec1(object):\n\n def setupUi(self, Rec1):\n Rec1.setObjectName('Rec1')\n Rec1.setFixedSize(450, 200)\n ico = QtGui.QIcon('mylogo.png')\n Rec1.setWindowIcon(ico)\n font = QtGui.QFont()\n font.setFamily('Times New Roman')\n font.setPointSize(14)\n self.centralwidget = QtWidgets.QWidget(Rec1)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))\n self.pushButton_photo.setFont(font)\n self.pushButton_photo.setObjectName('pushButton_photo')\n self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))\n self.pushButton_video.setFont(font)\n self.pushButton_video.setObjectName('pushButton_video')\n self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))\n self.pushButton_camera.setFont(font)\n self.pushButton_camera.setObjectName('pushButton_camera')\n self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))\n self.pushButton_back.setFont(font)\n self.pushButton_back.setObjectName('pushButton_back')\n self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))\n self.pushButton_exit.setFont(font)\n self.pushButton_exit.setObjectName('pushButton_exit')\n Rec1.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Rec1)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\n self.menubar.setObjectName('menubar')\n Rec1.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Rec1)\n self.statusbar.setObjectName('statusbar')\n Rec1.setStatusBar(self.statusbar)\n self.retranslateUi(Rec1)\n QtCore.QMetaObject.connectSlotsByName(Rec1)\n\n def retranslateUi(self, Rec1):\n _translate = QtCore.QCoreApplication.translate\n Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))\n self.pushButton_photo.setText(_translate('Rec1',\n 'Распознавание по фото'))\n self.pushButton_video.setText(_translate('Rec1',\n 'Распознавие по видео'))\n self.pushButton_camera.setText(_translate('Rec1',\n 'Распознавание с помощью веб-камеры'))\n self.pushButton_back.setText(_translate('Rec1', 'Назад'))\n self.pushButton_exit.setText(_translate('Rec1', 'Выход'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Rec1(object):\n\n def setupUi(self, Rec1):\n Rec1.setObjectName('Rec1')\n Rec1.setFixedSize(450, 200)\n ico = QtGui.QIcon('mylogo.png')\n Rec1.setWindowIcon(ico)\n font = QtGui.QFont()\n font.setFamily('Times New Roman')\n font.setPointSize(14)\n self.centralwidget = QtWidgets.QWidget(Rec1)\n self.centralwidget.setObjectName('centralwidget')\n self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))\n self.pushButton_photo.setFont(font)\n self.pushButton_photo.setObjectName('pushButton_photo')\n self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))\n self.pushButton_video.setFont(font)\n self.pushButton_video.setObjectName('pushButton_video')\n self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))\n self.pushButton_camera.setFont(font)\n self.pushButton_camera.setObjectName('pushButton_camera')\n self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))\n self.pushButton_back.setFont(font)\n self.pushButton_back.setObjectName('pushButton_back')\n self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))\n self.pushButton_exit.setFont(font)\n self.pushButton_exit.setObjectName('pushButton_exit')\n Rec1.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Rec1)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\n self.menubar.setObjectName('menubar')\n Rec1.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Rec1)\n self.statusbar.setObjectName('statusbar')\n Rec1.setStatusBar(self.statusbar)\n self.retranslateUi(Rec1)\n QtCore.QMetaObject.connectSlotsByName(Rec1)\n\n def retranslateUi(self, Rec1):\n _translate = QtCore.QCoreApplication.translate\n Rec1.setWindowTitle(_translate('Rec1', 'Recognition'))\n self.pushButton_photo.setText(_translate('Rec1',\n 'Распознавание по фото'))\n self.pushButton_video.setText(_translate('Rec1',\n 'Распознавие по видео'))\n self.pushButton_camera.setText(_translate('Rec1',\n 'Распознавание с помощью веб-камеры'))\n self.pushButton_back.setText(_translate('Rec1', 'Назад'))\n self.pushButton_exit.setText(_translate('Rec1', 'Выход'))\n",
"step-5": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Rec1(object):\n def setupUi(self, Rec1):\n Rec1.setObjectName(\"Rec1\")\n Rec1.setFixedSize(450, 200)\n ico = QtGui.QIcon(\"mylogo.png\")\n Rec1.setWindowIcon(ico)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(14)\n \n self.centralwidget = QtWidgets.QWidget(Rec1)\n self.centralwidget.setObjectName(\"centralwidget\")\n \n self.pushButton_photo = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_photo.setGeometry(QtCore.QRect(50, 20, 350, 30))\n self.pushButton_photo.setFont(font)\n self.pushButton_photo.setObjectName(\"pushButton_photo\")\n \n self.pushButton_video = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_video.setGeometry(QtCore.QRect(50, 60, 350, 30))\n self.pushButton_video.setFont(font)\n self.pushButton_video.setObjectName(\"pushButton_video\")\n \n self.pushButton_camera = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_camera.setGeometry(QtCore.QRect(50, 100, 350, 30))\n self.pushButton_camera.setFont(font)\n self.pushButton_camera.setObjectName(\"pushButton_camera\")\n \n self.pushButton_back = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_back.setGeometry(QtCore.QRect(50, 140, 170, 30))\n self.pushButton_back.setFont(font)\n self.pushButton_back.setObjectName(\"pushButton_back\")\n \n self.pushButton_exit = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_exit.setGeometry(QtCore.QRect(230, 140, 170, 30))\n self.pushButton_exit.setFont(font)\n self.pushButton_exit.setObjectName(\"pushButton_exit\")\n\n Rec1.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Rec1)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\n self.menubar.setObjectName(\"menubar\")\n Rec1.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Rec1)\n self.statusbar.setObjectName(\"statusbar\")\n Rec1.setStatusBar(self.statusbar)\n\n self.retranslateUi(Rec1)\n QtCore.QMetaObject.connectSlotsByName(Rec1)\n\n def retranslateUi(self, Rec1):\n _translate = QtCore.QCoreApplication.translate\n Rec1.setWindowTitle(_translate(\"Rec1\", \"Recognition\"))\n self.pushButton_photo.setText(_translate(\"Rec1\", \"Распознавание по фото\"))\n self.pushButton_video.setText(_translate(\"Rec1\", \"Распознавие по видео\"))\n self.pushButton_camera.setText(_translate(\"Rec1\", \"Распознавание с помощью веб-камеры\"))\n self.pushButton_back.setText(_translate(\"Rec1\", \"Назад\"))\n self.pushButton_exit.setText(_translate(\"Rec1\", \"Выход\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import sys
# Load the Haar cascades
face_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')
capture = cv2.VideoCapture(0)
_, image = capture.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
capture.release()
cv2.destroyAllWindows()
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) >= 1:
sys.stdout.write("1")
else:
sys.stdout.write("0")
|
normal
|
{
"blob_id": "4d707e23f66e8b6bea05a5901d3d8e459247c6c1",
"index": 3840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncapture.release()\ncv2.destroyAllWindows()\n<mask token>\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-3": "<mask token>\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-4": "import cv2\nimport sys\nface_cascade = cv2.CascadeClassifier(\n './haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncapture.release()\ncv2.destroyAllWindows()\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write('1')\nelse:\n sys.stdout.write('0')\n",
"step-5": "import cv2\nimport sys\n\n# Load the Haar cascades\nface_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_frontalface_default.xml')\neyes_cascade = cv2.CascadeClassifier('./haar_cascades/haarcascade_eye.xml')\n\ncapture = cv2.VideoCapture(0)\n_, image = capture.read()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\ncapture.release()\ncv2.destroyAllWindows()\n\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\nif len(faces) >= 1:\n sys.stdout.write(\"1\")\nelse:\n sys.stdout.write(\"0\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''Module for generating and plotting networks.'''
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps,
ep_label=None,
ep_capacity=12500,
num_channels=1,
racks_dict=None,
topology_type=None):
'''Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
# must be str or not json serialisable
servers = [str(i) for i in range(num_eps)]
else:
servers = [ep_label+'_'+str(i) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
# ep_label is None
eps.append(node)
network.graph['endpoints'] = eps
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=ep_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server',
rack_label='rack',
N=0,
num_channels=2,
server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10,
show_fig=False):
'''Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0,1],
[0,3],
[0,2],
[1,2],
[1,7],
[3,8],
[3,4],
[3,6],
[4,5],
[4,5],
[5,2],
[5,13],
[5,12],
[6,7],
[7,10],
[8,11],
[8,9],
[9,10],
[9,12],
[10,11],
[10,13],
[11,12]]
if N == 0:
# above nodes are all end points
label = ep_label
else:
# above nodes are ToR switch nodes
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
# add 14 nodes
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
# assume all nodes are servers
racks_dict = None
else:
# each of 14 nodes in NSFNET is a ToR switch
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label+'_'+str(i))
network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet',
racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server',
num_channels=2,
server_to_rack_channel_capacity=500,
show_fig=False):
'''Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0,1),
(0,2),
(1,2),
(2,4),
(4,3),
(3,1)],weight=1)
servers = [ep_label+'_'+str(i) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
topology_type='5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
'''Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
'''
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4,
L=2,
n=4,
ep_label='server',
rack_label='rack',
edge_label='edge',
aggregate_label='agg',
core_label='core',
num_channels = 2,
server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000,
edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000,
rack_to_core_channel_capacity=2000,
show_fig=False):
'''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
'''
if L != 2 and L != 4:
raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))
if k % 2 != 0:
raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))
channel_names = gen_channel_names(num_channels)
# initialise network nodes
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]
#num_cores = int((k/2)**(L-1))
#num_cores = int((k/2)**2)
num_cores = int((k/2)**(L/2))
num_aggs = int((k**2)/2)
num_edges = int((k**2)/2)
num_pods = int(2*(k/2)**(L-2))
num_racks = int(2*(k/2)**(L-1))
num_servers = int(num_racks * n)
cores = [core_label+'_'+str(i) for i in range(num_cores)]
aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]
edges = [edge_label+'_'+str(i) for i in range(num_edges)]
racks = [rack_label+'_'+str(i) for i in range(num_racks)]
servers = [ep_label+'_'+str(i) for i in range(num_servers)]
# create core and rack layer networks
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
# combine cores and racks into single network
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
# 2 layers: Core, ToR
# link racks to cores, add link attributes
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
# have k/2 up-ports on each switch
for up_port in range(int(k/2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network,
(rack, core),
channel_names,
rack_to_core_channel_capacity)
else:
# 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.
# group edges and aggregates into pods
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + (k/2))
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
# create dict of pod networks
pod_labels = ['pod_'+str(i) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = ('pod_'+str(pod_iter),)
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
# connect edge and aggregate switches within pod, add link attributes
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key],
(pod_agg,pod_edge),
channel_names,
edge_to_agg_channel_capacity)
# add pods (agg + edge) layer to fat-tree
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])
# link aggregate switches in pods to core switches, add link attributes
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network,
(core,pod_agg),
channel_names,
agg_to_core_channel_capacity)
# link edge switches in pods to racks, add link attributes
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network,
(pod_edge,rack),
channel_names,
rack_to_edge_channel_capacity)
# link servers to racks, add link attributes
racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network,
(rack, server),
channel_names,
server_to_rack_channel_capacity)
racks_dict[rack].append(server)
# calc total network capacity
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2
# init global network attrs
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=node_labels,
topology_type='fat_tree',
racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity,
endpoint_label = 'server',
topology_type='unknown',
node_labels=['server'],
racks_dict=None):
'''Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
'''
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
# ensure racks dict is str so json serialisable
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
# switch racks_dict keys and values to make hashing easier
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
'''Generates channel names for channels on each link in network.'''
channels = [channel+1 for channel in range(num_channels)]
channel_names = ['channel_' + str(channel) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network,
edge,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
}
else:
attrs = {edge:
{'channels': {channel: channel_capacity for channel in channel_names},
'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network,
edges,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
for edge in edges}
else:
attrs = {edge:
{'channels':
{channel: channel_capacity for channel in channel_names},
'max_channel_capacity':
channel_capacity
} for edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
'''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
# not this node type
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
'''Gets networkx positions of nodes in fat tree network for plotting.'''
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {} # dict for heigh separation between fat tree layers
widths = {} # dict for width separation between nodes within layers
h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1/(len(node_type_dict[node_type])+1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)
idx += 1
return pos
def init_network_node_positions(net):
'''Initialises network node positions for plotting.'''
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network,
draw_node_labels=True,
ep_label='server',
network_node_size=2000,
font_size=30,
linewidths=1,
fig_scale=2,
path_to_save=None,
show_fig=False):
'''Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
'''
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])
# add nodes and edges
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
# network nodes
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network,
pos,
nodelist=network_nodes_dict[node_type],
node_size=network_node_size,
node_color=next(node_colours),
linewidths=linewidths,
label=node_type)
if draw_node_labels:
# nodes
nx.draw_networkx_labels(network,
pos,
font_size=font_size,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0)
# fibre links
fibre_links = list(network.edges)
nx.draw_networkx_edges(network,
pos,
edgelist=fibre_links,
edge_color='k',
width=3,
label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
#network = gen_simple_network()
#network = gen_nsfnet_network()
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)
|
normal
|
{
"blob_id": "4cf2829282cb0a1673e741f78f17ce27a2817ff2",
"index": 651,
"step-1": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\n<mask token>\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\nif __name__ == '__main__':\n network = gen_fat_tree(k=3)\n plot_network(network, 'figures/graph/', name='network_graph.png',\n with_labels=True)\n",
"step-5": "'''Module for generating and plotting networks.'''\n\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps,\n ep_label=None, \n ep_capacity=12500, \n num_channels=1, \n racks_dict=None,\n topology_type=None):\n '''Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n \n if ep_label is None:\n # must be str or not json serialisable\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [ep_label+'_'+str(i) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n # ep_label is None\n eps.append(node)\n network.graph['endpoints'] = eps\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2\n\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)\n\n init_global_network_attrs(network,\n max_nw_capacity,\n num_channels,\n ep_link_capacity=ep_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n racks_dict=racks_dict,\n topology_type=topology_type)\n \n return network\n\n\n\ndef gen_nsfnet_network(ep_label='server', \n rack_label='rack',\n N=0, \n num_channels=2, \n server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10,\n show_fig=False):\n '''Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n\n node_pair_list = [[0,1],\n [0,3],\n [0,2],\n [1,2],\n [1,7],\n [3,8],\n [3,4],\n [3,6],\n [4,5],\n [4,5],\n [5,2],\n [5,13],\n [5,12],\n [6,7],\n [7,10],\n [8,11],\n [8,9],\n [9,10],\n [9,12],\n [10,11],\n [10,13],\n [11,12]]\n\n if N == 0:\n # above nodes are all end points\n label = ep_label\n else:\n # above nodes are ToR switch nodes\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n\n # add 14 nodes\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n\n if N == 0:\n # assume all nodes are servers\n racks_dict = None\n else:\n # each of 14 nodes in NSFNET is a ToR switch\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label+'_'+str(i))\n network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))\n i += 1\n \n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet',\n racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n \n return network\n\ndef gen_simple_network(ep_label='server', \n num_channels=2, \n server_to_rack_channel_capacity=500,\n show_fig=False):\n '''Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0,1),\n (0,2),\n (1,2),\n (2,4),\n (4,3),\n (3,1)],weight=1)\n servers = [ep_label+'_'+str(i) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n topology_type='5_node_simple_network')\n\n if show_fig:\n plot_network(network, show_fig=True)\n\n \n return network\n\ndef get_endpoints(network, ep_label):\n '''Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n '''\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n\n return eps\n\ndef gen_fat_tree(k=4,\n L=2,\n n=4,\n ep_label='server',\n rack_label='rack',\n edge_label='edge',\n aggregate_label='agg',\n core_label='core',\n num_channels = 2,\n server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000,\n edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000,\n rack_to_core_channel_capacity=2000,\n show_fig=False):\n '''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n '''\n if L != 2 and L != 4:\n raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))\n if k % 2 != 0:\n raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))\n\n channel_names = gen_channel_names(num_channels)\n\n # initialise network nodes\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]\n\n #num_cores = int((k/2)**(L-1))\n #num_cores = int((k/2)**2)\n num_cores = int((k/2)**(L/2))\n num_aggs = int((k**2)/2)\n num_edges = int((k**2)/2)\n num_pods = int(2*(k/2)**(L-2))\n num_racks = int(2*(k/2)**(L-1))\n num_servers = int(num_racks * n)\n\n cores = [core_label+'_'+str(i) for i in range(num_cores)]\n aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]\n edges = [edge_label+'_'+str(i) for i in range(num_edges)]\n racks = [rack_label+'_'+str(i) for i in range(num_racks)]\n servers = [ep_label+'_'+str(i) for i in range(num_servers)]\n\n # create core and rack layer networks\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n\n # combine cores and racks into single network\n fat_tree_network = nx.compose(core_layer, rack_layer)\n \n if L == 2:\n # 2 layers: Core, ToR\n # link racks to cores, add link attributes\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n # have k/2 up-ports on each switch\n for up_port in range(int(k/2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, core),\n channel_names,\n rack_to_core_channel_capacity)\n else:\n # 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.\n # group edges and aggregates into pods\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + (k/2))\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n\n # create dict of pod networks\n pod_labels = ['pod_'+str(i) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = ('pod_'+str(pod_iter),)\n pod_edges = pods[pod_iter][0]\n\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n\n # connect edge and aggregate switches within pod, add link attributes\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], \n (pod_agg,pod_edge), \n channel_names, \n edge_to_agg_channel_capacity)\n\n # add pods (agg + edge) layer to fat-tree\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])\n\n # link aggregate switches in pods to core switches, add link attributes\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network,\n (core,pod_agg),\n channel_names,\n agg_to_core_channel_capacity)\n\n # link edge switches in pods to racks, add link attributes\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network,\n (pod_edge,rack),\n channel_names,\n rack_to_edge_channel_capacity)\n\n # link servers to racks, add link attributes\n racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, server),\n channel_names,\n server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n\n # calc total network capacity\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2\n\n\n # init global network attrs\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=node_labels,\n topology_type='fat_tree',\n racks_dict=racks_dict)\n\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n\n return fat_tree_network\n\n \n\n\ndef init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity,\n endpoint_label = 'server',\n topology_type='unknown', \n node_labels=['server'],\n racks_dict=None):\n '''Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n '''\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n\n # ensure racks dict is str so json serialisable\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n\n if racks_dict is not None:\n # switch racks_dict keys and values to make hashing easier\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n '''Generates channel names for channels on each link in network.'''\n channels = [channel+1 for channel in range(num_channels)]\n channel_names = ['channel_' + str(channel) for channel in channels]\n \n return channel_names\n\ndef add_edge_capacity_attrs(network, \n edge, \n channel_names, \n channel_capacity, \n bidirectional_links=True):\n '''Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n }\n \n else:\n attrs = {edge:\n {'channels': {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': channel_capacity}}\n \n nx.set_edge_attributes(network, attrs)\n\n\n\n\ndef add_edges_capacity_attrs(network, \n edges,\n channel_names,\n channel_capacity,\n bidirectional_links=True):\n '''Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n for edge in edges}\n else:\n attrs = {edge: \n {'channels': \n {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': \n channel_capacity\n } for edge in edges}\n\n nx.set_edge_attributes(network, attrs)\n \n\ndef get_node_type_dict(network, node_types=[]):\n '''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n # not this node type\n pass\n \n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n '''Gets networkx positions of nodes in fat tree network for plotting.'''\n pos = {}\n\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n \n heights = {} # dict for heigh separation between fat tree layers\n widths = {} # dict for width separation between nodes within layers\n h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights\n for node_type in node_types: \n heights[node_type] = next(h)\n widths[node_type] = 1/(len(node_type_dict[node_type])+1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)\n idx += 1\n\n return pos\n \n\ndef init_network_node_positions(net):\n '''Initialises network node positions for plotting.'''\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n \n return pos\n\n\ndef plot_network(network,\n draw_node_labels=True,\n ep_label='server',\n network_node_size=2000,\n font_size=30,\n linewidths=1,\n fig_scale=2,\n path_to_save=None, \n show_fig=False):\n '''Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n '''\n \n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n\n fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])\n\n # add nodes and edges\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n \n # network nodes\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, \n pos, \n nodelist=network_nodes_dict[node_type],\n node_size=network_node_size, \n node_color=next(node_colours), \n linewidths=linewidths, \n label=node_type)\n if draw_node_labels:\n # nodes\n nx.draw_networkx_labels(network, \n pos, \n font_size=font_size, \n font_color='k', \n font_family='sans-serif', \n font_weight='normal', \n alpha=1.0)\n \n # fibre links\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, \n pos,\n edgelist=fibre_links,\n edge_color='k',\n width=3,\n label='Fibre link')\n\n\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n\n if show_fig:\n plt.show()\n\n return fig\n\n\nif __name__ == '__main__':\n #network = gen_simple_network()\n #network = gen_nsfnet_network()\n network = gen_fat_tree(k=3)\n \n plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.